diff --git a/sve_api/out_GenerateHWIntrinsicTests_Arm.cs b/sve_api/out_GenerateHWIntrinsicTests_Arm.cs new file mode 100644 index 0000000000000..c58457655bca6 --- /dev/null +++ b/sve_api/out_GenerateHWIntrinsicTests_Arm.cs @@ -0,0 +1,2951 @@ + +// Sve stores + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_float", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_double", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_sbyte", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_short", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_int", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_long", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_byte", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_ushort", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_uint", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_ulong", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_float", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector_Vector",["Op3BaseType"] = "Single_Single",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_double", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector_Vector",["Op3BaseType"] = "Double_Double",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_sbyte", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector_Vector",["Op3BaseType"] = "SByte_SByte",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_short", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector_Vector",["Op3BaseType"] = "Int16_Int16",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_int", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector_Vector",["Op3BaseType"] = "Int32_Int32",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_long", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector_Vector",["Op3BaseType"] = "Int64_Int64",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_byte", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector_Vector",["Op3BaseType"] = "Byte_Byte",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_ushort", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector_Vector",["Op3BaseType"] = "UInt16_UInt16",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_uint", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector_Vector",["Op3BaseType"] = "UInt32_UInt32",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_ulong", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector_Vector",["Op3BaseType"] = "UInt64_UInt64",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_float", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector_Vector_Vector",["Op3BaseType"] = "Single_Single_Single",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_double", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector_Vector_Vector",["Op3BaseType"] = "Double_Double_Double",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_sbyte", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector_Vector_Vector",["Op3BaseType"] = "SByte_SByte_SByte",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_short", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector_Vector_Vector",["Op3BaseType"] = "Int16_Int16_Int16",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_int", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector_Vector_Vector",["Op3BaseType"] = "Int32_Int32_Int32",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_long", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector_Vector_Vector",["Op3BaseType"] = "Int64_Int64_Int64",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_byte", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector_Vector_Vector",["Op3BaseType"] = "Byte_Byte_Byte",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_ushort", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector_Vector_Vector",["Op3BaseType"] = "UInt16_UInt16_UInt16",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_uint", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector_Vector_Vector",["Op3BaseType"] = "UInt32_UInt32_UInt32",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_ulong", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector_Vector_Vector",["Op3BaseType"] = "UInt64_UInt64_UInt64",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_float", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector_Vector_Vector_Vector",["Op3BaseType"] = "Single_Single_Single_Single",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_double", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector_Vector_Vector_Vector",["Op3BaseType"] = "Double_Double_Double_Double",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_sbyte", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector_Vector_Vector_Vector",["Op3BaseType"] = "SByte_SByte_SByte_SByte",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_short", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector_Vector_Vector_Vector",["Op3BaseType"] = "Int16_Int16_Int16_Int16",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_int", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector_Vector_Vector_Vector",["Op3BaseType"] = "Int32_Int32_Int32_Int32",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_long", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector_Vector_Vector_Vector",["Op3BaseType"] = "Int64_Int64_Int64_Int64",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_byte", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector_Vector_Vector_Vector",["Op3BaseType"] = "Byte_Byte_Byte_Byte",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_ushort", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector_Vector_Vector_Vector",["Op3BaseType"] = "UInt16_UInt16_UInt16_UInt16",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_uint", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector_Vector_Vector_Vector",["Op3BaseType"] = "UInt32_UInt32_UInt32_UInt32",["LargestVectorSize"] = "8",}), + ("SveStore.template", new Dictionary { ["TestName"] = "SveStore_ulong", ["Isa"] = "Sve", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector_Vector_Vector_Vector",["Op3BaseType"] = "UInt64_UInt64_UInt64_UInt64",["LargestVectorSize"] = "8",}), + ("SveStoreNarrowing.template", new Dictionary { ["TestName"] = "SveStoreNarrowing_short", ["Isa"] = "Sve", ["Method"] = "StoreNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveStoreNarrowing.template", new Dictionary { ["TestName"] = "SveStoreNarrowing_int", ["Isa"] = "Sve", ["Method"] = "StoreNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveStoreNarrowing.template", new Dictionary { ["TestName"] = "SveStoreNarrowing_long", ["Isa"] = "Sve", ["Method"] = "StoreNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveStoreNarrowing.template", new Dictionary { ["TestName"] = "SveStoreNarrowing_ushort", ["Isa"] = "Sve", ["Method"] = "StoreNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveStoreNarrowing.template", new Dictionary { ["TestName"] = "SveStoreNarrowing_uint", ["Isa"] = "Sve", ["Method"] = "StoreNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveStoreNarrowing.template", new Dictionary { ["TestName"] = "SveStoreNarrowing_ulong", ["Isa"] = "Sve", ["Method"] = "StoreNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveStoreNarrowing.template", new Dictionary { ["TestName"] = "SveStoreNarrowing_int", ["Isa"] = "Sve", ["Method"] = "StoreNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveStoreNarrowing.template", new Dictionary { ["TestName"] = "SveStoreNarrowing_long", ["Isa"] = "Sve", ["Method"] = "StoreNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveStoreNarrowing.template", new Dictionary { ["TestName"] = "SveStoreNarrowing_uint", ["Isa"] = "Sve", ["Method"] = "StoreNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveStoreNarrowing.template", new Dictionary { ["TestName"] = "SveStoreNarrowing_ulong", ["Isa"] = "Sve", ["Method"] = "StoreNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveStoreNarrowing.template", new Dictionary { ["TestName"] = "SveStoreNarrowing_long", ["Isa"] = "Sve", ["Method"] = "StoreNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveStoreNarrowing.template", new Dictionary { ["TestName"] = "SveStoreNarrowing_ulong", ["Isa"] = "Sve", ["Method"] = "StoreNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveStoreNonTemporal.template", new Dictionary { ["TestName"] = "SveStoreNonTemporal_float", ["Isa"] = "Sve", ["Method"] = "StoreNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveStoreNonTemporal.template", new Dictionary { ["TestName"] = "SveStoreNonTemporal_double", ["Isa"] = "Sve", ["Method"] = "StoreNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveStoreNonTemporal.template", new Dictionary { ["TestName"] = "SveStoreNonTemporal_sbyte", ["Isa"] = "Sve", ["Method"] = "StoreNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveStoreNonTemporal.template", new Dictionary { ["TestName"] = "SveStoreNonTemporal_short", ["Isa"] = "Sve", ["Method"] = "StoreNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveStoreNonTemporal.template", new Dictionary { ["TestName"] = "SveStoreNonTemporal_int", ["Isa"] = "Sve", ["Method"] = "StoreNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveStoreNonTemporal.template", new Dictionary { ["TestName"] = "SveStoreNonTemporal_long", ["Isa"] = "Sve", ["Method"] = "StoreNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveStoreNonTemporal.template", new Dictionary { ["TestName"] = "SveStoreNonTemporal_byte", ["Isa"] = "Sve", ["Method"] = "StoreNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveStoreNonTemporal.template", new Dictionary { ["TestName"] = "SveStoreNonTemporal_ushort", ["Isa"] = "Sve", ["Method"] = "StoreNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveStoreNonTemporal.template", new Dictionary { ["TestName"] = "SveStoreNonTemporal_uint", ["Isa"] = "Sve", ["Method"] = "StoreNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveStoreNonTemporal.template", new Dictionary { ["TestName"] = "SveStoreNonTemporal_ulong", ["Isa"] = "Sve", ["Method"] = "StoreNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + + +// Sve scatterstores + ("SveScatter.template", new Dictionary { ["TestName"] = "SveScatter_float_uint", ["Isa"] = "Sve", ["Method"] = "Scatter", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveScatter.template", new Dictionary { ["TestName"] = "SveScatter_int_uint", ["Isa"] = "Sve", ["Method"] = "Scatter", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveScatter.template", new Dictionary { ["TestName"] = "SveScatter_uint_uint", ["Isa"] = "Sve", ["Method"] = "Scatter", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveScatter.template", new Dictionary { ["TestName"] = "SveScatter_double_ulong", ["Isa"] = "Sve", ["Method"] = "Scatter", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveScatter.template", new Dictionary { ["TestName"] = "SveScatter_long_ulong", ["Isa"] = "Sve", ["Method"] = "Scatter", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveScatter.template", new Dictionary { ["TestName"] = "SveScatter_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "Scatter", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveScatter.template", new Dictionary { ["TestName"] = "SveScatter_float_int", ["Isa"] = "Sve", ["Method"] = "Scatter", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Single",["LargestVectorSize"] = "8",}), + ("SveScatter.template", new Dictionary { ["TestName"] = "SveScatter_int_int", ["Isa"] = "Sve", ["Method"] = "Scatter", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int32",["LargestVectorSize"] = "8",}), + ("SveScatter.template", new Dictionary { ["TestName"] = "SveScatter_uint_int", ["Isa"] = "Sve", ["Method"] = "Scatter", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt32",["LargestVectorSize"] = "8",}), + ("SveScatter.template", new Dictionary { ["TestName"] = "SveScatter_float_uint", ["Isa"] = "Sve", ["Method"] = "Scatter", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Single",["LargestVectorSize"] = "8",}), + ("SveScatter.template", new Dictionary { ["TestName"] = "SveScatter_int_uint", ["Isa"] = "Sve", ["Method"] = "Scatter", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int32",["LargestVectorSize"] = "8",}), + ("SveScatter.template", new Dictionary { ["TestName"] = "SveScatter_uint_uint", ["Isa"] = "Sve", ["Method"] = "Scatter", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt32",["LargestVectorSize"] = "8",}), + ("SveScatter.template", new Dictionary { ["TestName"] = "SveScatter_double_long", ["Isa"] = "Sve", ["Method"] = "Scatter", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Double",["LargestVectorSize"] = "8",}), + ("SveScatter.template", new Dictionary { ["TestName"] = "SveScatter_long_long", ["Isa"] = "Sve", ["Method"] = "Scatter", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int64",["LargestVectorSize"] = "8",}), + ("SveScatter.template", new Dictionary { ["TestName"] = "SveScatter_ulong_long", ["Isa"] = "Sve", ["Method"] = "Scatter", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("SveScatter.template", new Dictionary { ["TestName"] = "SveScatter_double_ulong", ["Isa"] = "Sve", ["Method"] = "Scatter", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Double",["LargestVectorSize"] = "8",}), + ("SveScatter.template", new Dictionary { ["TestName"] = "SveScatter_long_ulong", ["Isa"] = "Sve", ["Method"] = "Scatter", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int64",["LargestVectorSize"] = "8",}), + ("SveScatter.template", new Dictionary { ["TestName"] = "SveScatter_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "Scatter", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("SveScatter16BitNarrowing.template", new Dictionary { ["TestName"] = "SveScatter16BitNarrowing_int_uint", ["Isa"] = "Sve", ["Method"] = "Scatter16BitNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveScatter16BitNarrowing.template", new Dictionary { ["TestName"] = "SveScatter16BitNarrowing_uint", ["Isa"] = "Sve", ["Method"] = "Scatter16BitNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveScatter16BitNarrowing.template", new Dictionary { ["TestName"] = "SveScatter16BitNarrowing_long_ulong", ["Isa"] = "Sve", ["Method"] = "Scatter16BitNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveScatter16BitNarrowing.template", new Dictionary { ["TestName"] = "SveScatter16BitNarrowing_ulong", ["Isa"] = "Sve", ["Method"] = "Scatter16BitNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveScatter16BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter16BitWithByteOffsetsNarrowing_int", ["Isa"] = "Sve", ["Method"] = "Scatter16BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int32",["LargestVectorSize"] = "8",}), + ("SveScatter16BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter16BitWithByteOffsetsNarrowing_uint_int", ["Isa"] = "Sve", ["Method"] = "Scatter16BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt32",["LargestVectorSize"] = "8",}), + ("SveScatter16BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter16BitWithByteOffsetsNarrowing_int_uint", ["Isa"] = "Sve", ["Method"] = "Scatter16BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int32",["LargestVectorSize"] = "8",}), + ("SveScatter16BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter16BitWithByteOffsetsNarrowing_uint", ["Isa"] = "Sve", ["Method"] = "Scatter16BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt32",["LargestVectorSize"] = "8",}), + ("SveScatter16BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter16BitWithByteOffsetsNarrowing_long", ["Isa"] = "Sve", ["Method"] = "Scatter16BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int64",["LargestVectorSize"] = "8",}), + ("SveScatter16BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter16BitWithByteOffsetsNarrowing_ulong_long", ["Isa"] = "Sve", ["Method"] = "Scatter16BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("SveScatter16BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter16BitWithByteOffsetsNarrowing_long_ulong", ["Isa"] = "Sve", ["Method"] = "Scatter16BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int64",["LargestVectorSize"] = "8",}), + ("SveScatter16BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter16BitWithByteOffsetsNarrowing_ulong", ["Isa"] = "Sve", ["Method"] = "Scatter16BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("SveScatter16BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter16BitWithByteOffsetsNarrowing_int", ["Isa"] = "Sve", ["Method"] = "Scatter16BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int32",["LargestVectorSize"] = "8",}), + ("SveScatter16BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter16BitWithByteOffsetsNarrowing_uint_int", ["Isa"] = "Sve", ["Method"] = "Scatter16BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt32",["LargestVectorSize"] = "8",}), + ("SveScatter16BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter16BitWithByteOffsetsNarrowing_int_uint", ["Isa"] = "Sve", ["Method"] = "Scatter16BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int32",["LargestVectorSize"] = "8",}), + ("SveScatter16BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter16BitWithByteOffsetsNarrowing_uint", ["Isa"] = "Sve", ["Method"] = "Scatter16BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt32",["LargestVectorSize"] = "8",}), + ("SveScatter16BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter16BitWithByteOffsetsNarrowing_long", ["Isa"] = "Sve", ["Method"] = "Scatter16BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int64",["LargestVectorSize"] = "8",}), + ("SveScatter16BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter16BitWithByteOffsetsNarrowing_ulong_long", ["Isa"] = "Sve", ["Method"] = "Scatter16BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("SveScatter16BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter16BitWithByteOffsetsNarrowing_long_ulong", ["Isa"] = "Sve", ["Method"] = "Scatter16BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int64",["LargestVectorSize"] = "8",}), + ("SveScatter16BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter16BitWithByteOffsetsNarrowing_ulong", ["Isa"] = "Sve", ["Method"] = "Scatter16BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("SveScatter32BitNarrowing.template", new Dictionary { ["TestName"] = "SveScatter32BitNarrowing_long_ulong", ["Isa"] = "Sve", ["Method"] = "Scatter32BitNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveScatter32BitNarrowing.template", new Dictionary { ["TestName"] = "SveScatter32BitNarrowing_ulong", ["Isa"] = "Sve", ["Method"] = "Scatter32BitNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveScatter32BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter32BitWithByteOffsetsNarrowing_long", ["Isa"] = "Sve", ["Method"] = "Scatter32BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int64",["LargestVectorSize"] = "8",}), + ("SveScatter32BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter32BitWithByteOffsetsNarrowing_ulong_long", ["Isa"] = "Sve", ["Method"] = "Scatter32BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("SveScatter32BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter32BitWithByteOffsetsNarrowing_long_ulong", ["Isa"] = "Sve", ["Method"] = "Scatter32BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int64",["LargestVectorSize"] = "8",}), + ("SveScatter32BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter32BitWithByteOffsetsNarrowing_ulong", ["Isa"] = "Sve", ["Method"] = "Scatter32BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("SveScatter32BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter32BitWithByteOffsetsNarrowing_long", ["Isa"] = "Sve", ["Method"] = "Scatter32BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int64",["LargestVectorSize"] = "8",}), + ("SveScatter32BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter32BitWithByteOffsetsNarrowing_ulong_long", ["Isa"] = "Sve", ["Method"] = "Scatter32BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("SveScatter32BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter32BitWithByteOffsetsNarrowing_long_ulong", ["Isa"] = "Sve", ["Method"] = "Scatter32BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int64",["LargestVectorSize"] = "8",}), + ("SveScatter32BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter32BitWithByteOffsetsNarrowing_ulong", ["Isa"] = "Sve", ["Method"] = "Scatter32BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("SveScatter8BitNarrowing.template", new Dictionary { ["TestName"] = "SveScatter8BitNarrowing_int_uint", ["Isa"] = "Sve", ["Method"] = "Scatter8BitNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveScatter8BitNarrowing.template", new Dictionary { ["TestName"] = "SveScatter8BitNarrowing_uint", ["Isa"] = "Sve", ["Method"] = "Scatter8BitNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveScatter8BitNarrowing.template", new Dictionary { ["TestName"] = "SveScatter8BitNarrowing_long_ulong", ["Isa"] = "Sve", ["Method"] = "Scatter8BitNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveScatter8BitNarrowing.template", new Dictionary { ["TestName"] = "SveScatter8BitNarrowing_ulong", ["Isa"] = "Sve", ["Method"] = "Scatter8BitNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveScatter8BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter8BitWithByteOffsetsNarrowing_int", ["Isa"] = "Sve", ["Method"] = "Scatter8BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int32",["LargestVectorSize"] = "8",}), + ("SveScatter8BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter8BitWithByteOffsetsNarrowing_uint_int", ["Isa"] = "Sve", ["Method"] = "Scatter8BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt32",["LargestVectorSize"] = "8",}), + ("SveScatter8BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter8BitWithByteOffsetsNarrowing_int_uint", ["Isa"] = "Sve", ["Method"] = "Scatter8BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int32",["LargestVectorSize"] = "8",}), + ("SveScatter8BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter8BitWithByteOffsetsNarrowing_uint", ["Isa"] = "Sve", ["Method"] = "Scatter8BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt32",["LargestVectorSize"] = "8",}), + ("SveScatter8BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter8BitWithByteOffsetsNarrowing_long", ["Isa"] = "Sve", ["Method"] = "Scatter8BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int64",["LargestVectorSize"] = "8",}), + ("SveScatter8BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter8BitWithByteOffsetsNarrowing_ulong_long", ["Isa"] = "Sve", ["Method"] = "Scatter8BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("SveScatter8BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter8BitWithByteOffsetsNarrowing_long_ulong", ["Isa"] = "Sve", ["Method"] = "Scatter8BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int64",["LargestVectorSize"] = "8",}), + ("SveScatter8BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "SveScatter8BitWithByteOffsetsNarrowing_ulong", ["Isa"] = "Sve", ["Method"] = "Scatter8BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + + +// Sve maths + ("SveAbs.template", new Dictionary { ["TestName"] = "SveAbs_float", ["Isa"] = "Sve", ["Method"] = "Abs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveAbs.template", new Dictionary { ["TestName"] = "SveAbs_double", ["Isa"] = "Sve", ["Method"] = "Abs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveAbs.template", new Dictionary { ["TestName"] = "SveAbs_sbyte", ["Isa"] = "Sve", ["Method"] = "Abs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveAbs.template", new Dictionary { ["TestName"] = "SveAbs_short", ["Isa"] = "Sve", ["Method"] = "Abs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveAbs.template", new Dictionary { ["TestName"] = "SveAbs_int", ["Isa"] = "Sve", ["Method"] = "Abs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveAbs.template", new Dictionary { ["TestName"] = "SveAbs_long", ["Isa"] = "Sve", ["Method"] = "Abs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveAbsoluteDifference.template", new Dictionary { ["TestName"] = "SveAbsoluteDifference_float", ["Isa"] = "Sve", ["Method"] = "AbsoluteDifference", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveAbsoluteDifference.template", new Dictionary { ["TestName"] = "SveAbsoluteDifference_double", ["Isa"] = "Sve", ["Method"] = "AbsoluteDifference", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveAbsoluteDifference.template", new Dictionary { ["TestName"] = "SveAbsoluteDifference_sbyte", ["Isa"] = "Sve", ["Method"] = "AbsoluteDifference", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveAbsoluteDifference.template", new Dictionary { ["TestName"] = "SveAbsoluteDifference_short", ["Isa"] = "Sve", ["Method"] = "AbsoluteDifference", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveAbsoluteDifference.template", new Dictionary { ["TestName"] = "SveAbsoluteDifference_int", ["Isa"] = "Sve", ["Method"] = "AbsoluteDifference", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveAbsoluteDifference.template", new Dictionary { ["TestName"] = "SveAbsoluteDifference_long", ["Isa"] = "Sve", ["Method"] = "AbsoluteDifference", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveAbsoluteDifference.template", new Dictionary { ["TestName"] = "SveAbsoluteDifference_byte", ["Isa"] = "Sve", ["Method"] = "AbsoluteDifference", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveAbsoluteDifference.template", new Dictionary { ["TestName"] = "SveAbsoluteDifference_ushort", ["Isa"] = "Sve", ["Method"] = "AbsoluteDifference", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveAbsoluteDifference.template", new Dictionary { ["TestName"] = "SveAbsoluteDifference_uint", ["Isa"] = "Sve", ["Method"] = "AbsoluteDifference", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveAbsoluteDifference.template", new Dictionary { ["TestName"] = "SveAbsoluteDifference_ulong", ["Isa"] = "Sve", ["Method"] = "AbsoluteDifference", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveAdd.template", new Dictionary { ["TestName"] = "SveAdd_float", ["Isa"] = "Sve", ["Method"] = "Add", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveAdd.template", new Dictionary { ["TestName"] = "SveAdd_double", ["Isa"] = "Sve", ["Method"] = "Add", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveAdd.template", new Dictionary { ["TestName"] = "SveAdd_sbyte", ["Isa"] = "Sve", ["Method"] = "Add", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveAdd.template", new Dictionary { ["TestName"] = "SveAdd_short", ["Isa"] = "Sve", ["Method"] = "Add", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveAdd.template", new Dictionary { ["TestName"] = "SveAdd_int", ["Isa"] = "Sve", ["Method"] = "Add", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveAdd.template", new Dictionary { ["TestName"] = "SveAdd_long", ["Isa"] = "Sve", ["Method"] = "Add", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveAdd.template", new Dictionary { ["TestName"] = "SveAdd_byte", ["Isa"] = "Sve", ["Method"] = "Add", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveAdd.template", new Dictionary { ["TestName"] = "SveAdd_ushort", ["Isa"] = "Sve", ["Method"] = "Add", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveAdd.template", new Dictionary { ["TestName"] = "SveAdd_uint", ["Isa"] = "Sve", ["Method"] = "Add", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveAdd.template", new Dictionary { ["TestName"] = "SveAdd_ulong", ["Isa"] = "Sve", ["Method"] = "Add", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveAddAcross.template", new Dictionary { ["TestName"] = "SveAddAcross_float", ["Isa"] = "Sve", ["Method"] = "AddAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveAddAcross.template", new Dictionary { ["TestName"] = "SveAddAcross_double", ["Isa"] = "Sve", ["Method"] = "AddAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveAddAcross.template", new Dictionary { ["TestName"] = "SveAddAcross_long_sbyte", ["Isa"] = "Sve", ["Method"] = "AddAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveAddAcross.template", new Dictionary { ["TestName"] = "SveAddAcross_long_short", ["Isa"] = "Sve", ["Method"] = "AddAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveAddAcross.template", new Dictionary { ["TestName"] = "SveAddAcross_long_int", ["Isa"] = "Sve", ["Method"] = "AddAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveAddAcross.template", new Dictionary { ["TestName"] = "SveAddAcross_long", ["Isa"] = "Sve", ["Method"] = "AddAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveAddAcross.template", new Dictionary { ["TestName"] = "SveAddAcross_ulong_byte", ["Isa"] = "Sve", ["Method"] = "AddAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveAddAcross.template", new Dictionary { ["TestName"] = "SveAddAcross_ulong_ushort", ["Isa"] = "Sve", ["Method"] = "AddAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveAddAcross.template", new Dictionary { ["TestName"] = "SveAddAcross_ulong_uint", ["Isa"] = "Sve", ["Method"] = "AddAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveAddAcross.template", new Dictionary { ["TestName"] = "SveAddAcross_ulong", ["Isa"] = "Sve", ["Method"] = "AddAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveAddSaturate.template", new Dictionary { ["TestName"] = "SveAddSaturate_sbyte", ["Isa"] = "Sve", ["Method"] = "AddSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveAddSaturate.template", new Dictionary { ["TestName"] = "SveAddSaturate_short", ["Isa"] = "Sve", ["Method"] = "AddSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveAddSaturate.template", new Dictionary { ["TestName"] = "SveAddSaturate_int", ["Isa"] = "Sve", ["Method"] = "AddSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveAddSaturate.template", new Dictionary { ["TestName"] = "SveAddSaturate_long", ["Isa"] = "Sve", ["Method"] = "AddSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveAddSaturate.template", new Dictionary { ["TestName"] = "SveAddSaturate_byte", ["Isa"] = "Sve", ["Method"] = "AddSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveAddSaturate.template", new Dictionary { ["TestName"] = "SveAddSaturate_ushort", ["Isa"] = "Sve", ["Method"] = "AddSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveAddSaturate.template", new Dictionary { ["TestName"] = "SveAddSaturate_uint", ["Isa"] = "Sve", ["Method"] = "AddSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveAddSaturate.template", new Dictionary { ["TestName"] = "SveAddSaturate_ulong", ["Isa"] = "Sve", ["Method"] = "AddSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveDivide.template", new Dictionary { ["TestName"] = "SveDivide_float", ["Isa"] = "Sve", ["Method"] = "Divide", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveDivide.template", new Dictionary { ["TestName"] = "SveDivide_double", ["Isa"] = "Sve", ["Method"] = "Divide", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveDivide.template", new Dictionary { ["TestName"] = "SveDivide_int", ["Isa"] = "Sve", ["Method"] = "Divide", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveDivide.template", new Dictionary { ["TestName"] = "SveDivide_long", ["Isa"] = "Sve", ["Method"] = "Divide", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveDivide.template", new Dictionary { ["TestName"] = "SveDivide_uint", ["Isa"] = "Sve", ["Method"] = "Divide", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveDivide.template", new Dictionary { ["TestName"] = "SveDivide_ulong", ["Isa"] = "Sve", ["Method"] = "Divide", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveDotProduct.template", new Dictionary { ["TestName"] = "SveDotProduct_int_sbyte", ["Isa"] = "Sve", ["Method"] = "DotProduct", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveDotProduct.template", new Dictionary { ["TestName"] = "SveDotProduct_long_short", ["Isa"] = "Sve", ["Method"] = "DotProduct", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveDotProduct.template", new Dictionary { ["TestName"] = "SveDotProduct_uint_byte", ["Isa"] = "Sve", ["Method"] = "DotProduct", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveDotProduct.template", new Dictionary { ["TestName"] = "SveDotProduct_ulong_ushort", ["Isa"] = "Sve", ["Method"] = "DotProduct", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveDotProductBySelectedScalar.template",new Dictionary {["TestName"] = "SveDotProductBySelectedScalar_int_sbyte", ["Isa"] = "Sve", ["Method"] = "DotProductBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("SveDotProductBySelectedScalar.template",new Dictionary {["TestName"] = "SveDotProductBySelectedScalar_long_short", ["Isa"] = "Sve", ["Method"] = "DotProductBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("SveDotProductBySelectedScalar.template",new Dictionary {["TestName"] = "SveDotProductBySelectedScalar_uint_byte", ["Isa"] = "Sve", ["Method"] = "DotProductBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("SveDotProductBySelectedScalar.template",new Dictionary {["TestName"] = "SveDotProductBySelectedScalar_ulong_ushort", ["Isa"] = "Sve", ["Method"] = "DotProductBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("SveFusedMultiplyAdd.template", new Dictionary { ["TestName"] = "SveFusedMultiplyAdd_float", ["Isa"] = "Sve", ["Method"] = "FusedMultiplyAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveFusedMultiplyAdd.template", new Dictionary { ["TestName"] = "SveFusedMultiplyAdd_double", ["Isa"] = "Sve", ["Method"] = "FusedMultiplyAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveFusedMultiplyAddBySelectedScalar.template",new Dictionary {["TestName"] = "SveFusedMultiplyAddBySelectedScalar_float", ["Isa"] = "Sve", ["Method"] = "FusedMultiplyAddBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Single",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("SveFusedMultiplyAddBySelectedScalar.template",new Dictionary {["TestName"] = "SveFusedMultiplyAddBySelectedScalar_double", ["Isa"] = "Sve", ["Method"] = "FusedMultiplyAddBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Double",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("SveFusedMultiplyAddNegated.template",new Dictionary {["TestName"] = "SveFusedMultiplyAddNegated_float", ["Isa"] = "Sve", ["Method"] = "FusedMultiplyAddNegated", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveFusedMultiplyAddNegated.template",new Dictionary {["TestName"] = "SveFusedMultiplyAddNegated_double", ["Isa"] = "Sve", ["Method"] = "FusedMultiplyAddNegated", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveFusedMultiplySubtract.template", new Dictionary { ["TestName"] = "SveFusedMultiplySubtract_float", ["Isa"] = "Sve", ["Method"] = "FusedMultiplySubtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveFusedMultiplySubtract.template", new Dictionary { ["TestName"] = "SveFusedMultiplySubtract_double", ["Isa"] = "Sve", ["Method"] = "FusedMultiplySubtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveFusedMultiplySubtractBySelectedScalar.template",new Dictionary {["TestName"] = "SveFusedMultiplySubtractBySelectedScalar_float", ["Isa"] = "Sve", ["Method"] = "FusedMultiplySubtractBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Single",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("SveFusedMultiplySubtractBySelectedScalar.template",new Dictionary {["TestName"] = "SveFusedMultiplySubtractBySelectedScalar_double", ["Isa"] = "Sve", ["Method"] = "FusedMultiplySubtractBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Double",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("SveFusedMultiplySubtractNegated.template",new Dictionary {["TestName"] = "SveFusedMultiplySubtractNegated_float", ["Isa"] = "Sve", ["Method"] = "FusedMultiplySubtractNegated", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveFusedMultiplySubtractNegated.template",new Dictionary {["TestName"] = "SveFusedMultiplySubtractNegated_double", ["Isa"] = "Sve", ["Method"] = "FusedMultiplySubtractNegated", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveMax.template", new Dictionary { ["TestName"] = "SveMax_float", ["Isa"] = "Sve", ["Method"] = "Max", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveMax.template", new Dictionary { ["TestName"] = "SveMax_double", ["Isa"] = "Sve", ["Method"] = "Max", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveMax.template", new Dictionary { ["TestName"] = "SveMax_sbyte", ["Isa"] = "Sve", ["Method"] = "Max", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveMax.template", new Dictionary { ["TestName"] = "SveMax_short", ["Isa"] = "Sve", ["Method"] = "Max", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveMax.template", new Dictionary { ["TestName"] = "SveMax_int", ["Isa"] = "Sve", ["Method"] = "Max", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveMax.template", new Dictionary { ["TestName"] = "SveMax_long", ["Isa"] = "Sve", ["Method"] = "Max", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveMax.template", new Dictionary { ["TestName"] = "SveMax_byte", ["Isa"] = "Sve", ["Method"] = "Max", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveMax.template", new Dictionary { ["TestName"] = "SveMax_ushort", ["Isa"] = "Sve", ["Method"] = "Max", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveMax.template", new Dictionary { ["TestName"] = "SveMax_uint", ["Isa"] = "Sve", ["Method"] = "Max", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveMax.template", new Dictionary { ["TestName"] = "SveMax_ulong", ["Isa"] = "Sve", ["Method"] = "Max", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveMaxAcross.template", new Dictionary { ["TestName"] = "SveMaxAcross_float", ["Isa"] = "Sve", ["Method"] = "MaxAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveMaxAcross.template", new Dictionary { ["TestName"] = "SveMaxAcross_double", ["Isa"] = "Sve", ["Method"] = "MaxAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveMaxAcross.template", new Dictionary { ["TestName"] = "SveMaxAcross_sbyte", ["Isa"] = "Sve", ["Method"] = "MaxAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveMaxAcross.template", new Dictionary { ["TestName"] = "SveMaxAcross_short", ["Isa"] = "Sve", ["Method"] = "MaxAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveMaxAcross.template", new Dictionary { ["TestName"] = "SveMaxAcross_int", ["Isa"] = "Sve", ["Method"] = "MaxAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveMaxAcross.template", new Dictionary { ["TestName"] = "SveMaxAcross_long", ["Isa"] = "Sve", ["Method"] = "MaxAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveMaxAcross.template", new Dictionary { ["TestName"] = "SveMaxAcross_byte", ["Isa"] = "Sve", ["Method"] = "MaxAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveMaxAcross.template", new Dictionary { ["TestName"] = "SveMaxAcross_ushort", ["Isa"] = "Sve", ["Method"] = "MaxAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveMaxAcross.template", new Dictionary { ["TestName"] = "SveMaxAcross_uint", ["Isa"] = "Sve", ["Method"] = "MaxAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveMaxAcross.template", new Dictionary { ["TestName"] = "SveMaxAcross_ulong", ["Isa"] = "Sve", ["Method"] = "MaxAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveMaxNumber.template", new Dictionary { ["TestName"] = "SveMaxNumber_float", ["Isa"] = "Sve", ["Method"] = "MaxNumber", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveMaxNumber.template", new Dictionary { ["TestName"] = "SveMaxNumber_double", ["Isa"] = "Sve", ["Method"] = "MaxNumber", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveMaxNumberAcross.template", new Dictionary { ["TestName"] = "SveMaxNumberAcross_float", ["Isa"] = "Sve", ["Method"] = "MaxNumberAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveMaxNumberAcross.template", new Dictionary { ["TestName"] = "SveMaxNumberAcross_double", ["Isa"] = "Sve", ["Method"] = "MaxNumberAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveMin.template", new Dictionary { ["TestName"] = "SveMin_float", ["Isa"] = "Sve", ["Method"] = "Min", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveMin.template", new Dictionary { ["TestName"] = "SveMin_double", ["Isa"] = "Sve", ["Method"] = "Min", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveMin.template", new Dictionary { ["TestName"] = "SveMin_sbyte", ["Isa"] = "Sve", ["Method"] = "Min", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveMin.template", new Dictionary { ["TestName"] = "SveMin_short", ["Isa"] = "Sve", ["Method"] = "Min", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveMin.template", new Dictionary { ["TestName"] = "SveMin_int", ["Isa"] = "Sve", ["Method"] = "Min", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveMin.template", new Dictionary { ["TestName"] = "SveMin_long", ["Isa"] = "Sve", ["Method"] = "Min", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveMin.template", new Dictionary { ["TestName"] = "SveMin_byte", ["Isa"] = "Sve", ["Method"] = "Min", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveMin.template", new Dictionary { ["TestName"] = "SveMin_ushort", ["Isa"] = "Sve", ["Method"] = "Min", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveMin.template", new Dictionary { ["TestName"] = "SveMin_uint", ["Isa"] = "Sve", ["Method"] = "Min", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveMin.template", new Dictionary { ["TestName"] = "SveMin_ulong", ["Isa"] = "Sve", ["Method"] = "Min", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveMinAcross.template", new Dictionary { ["TestName"] = "SveMinAcross_float", ["Isa"] = "Sve", ["Method"] = "MinAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveMinAcross.template", new Dictionary { ["TestName"] = "SveMinAcross_double", ["Isa"] = "Sve", ["Method"] = "MinAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveMinAcross.template", new Dictionary { ["TestName"] = "SveMinAcross_sbyte", ["Isa"] = "Sve", ["Method"] = "MinAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveMinAcross.template", new Dictionary { ["TestName"] = "SveMinAcross_short", ["Isa"] = "Sve", ["Method"] = "MinAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveMinAcross.template", new Dictionary { ["TestName"] = "SveMinAcross_int", ["Isa"] = "Sve", ["Method"] = "MinAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveMinAcross.template", new Dictionary { ["TestName"] = "SveMinAcross_long", ["Isa"] = "Sve", ["Method"] = "MinAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveMinAcross.template", new Dictionary { ["TestName"] = "SveMinAcross_byte", ["Isa"] = "Sve", ["Method"] = "MinAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveMinAcross.template", new Dictionary { ["TestName"] = "SveMinAcross_ushort", ["Isa"] = "Sve", ["Method"] = "MinAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveMinAcross.template", new Dictionary { ["TestName"] = "SveMinAcross_uint", ["Isa"] = "Sve", ["Method"] = "MinAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveMinAcross.template", new Dictionary { ["TestName"] = "SveMinAcross_ulong", ["Isa"] = "Sve", ["Method"] = "MinAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveMinNumber.template", new Dictionary { ["TestName"] = "SveMinNumber_float", ["Isa"] = "Sve", ["Method"] = "MinNumber", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveMinNumber.template", new Dictionary { ["TestName"] = "SveMinNumber_double", ["Isa"] = "Sve", ["Method"] = "MinNumber", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveMinNumberAcross.template", new Dictionary { ["TestName"] = "SveMinNumberAcross_float", ["Isa"] = "Sve", ["Method"] = "MinNumberAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveMinNumberAcross.template", new Dictionary { ["TestName"] = "SveMinNumberAcross_double", ["Isa"] = "Sve", ["Method"] = "MinNumberAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveMultiply.template", new Dictionary { ["TestName"] = "SveMultiply_float", ["Isa"] = "Sve", ["Method"] = "Multiply", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveMultiply.template", new Dictionary { ["TestName"] = "SveMultiply_double", ["Isa"] = "Sve", ["Method"] = "Multiply", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveMultiply.template", new Dictionary { ["TestName"] = "SveMultiply_sbyte", ["Isa"] = "Sve", ["Method"] = "Multiply", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveMultiply.template", new Dictionary { ["TestName"] = "SveMultiply_short", ["Isa"] = "Sve", ["Method"] = "Multiply", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveMultiply.template", new Dictionary { ["TestName"] = "SveMultiply_int", ["Isa"] = "Sve", ["Method"] = "Multiply", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveMultiply.template", new Dictionary { ["TestName"] = "SveMultiply_long", ["Isa"] = "Sve", ["Method"] = "Multiply", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveMultiply.template", new Dictionary { ["TestName"] = "SveMultiply_byte", ["Isa"] = "Sve", ["Method"] = "Multiply", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveMultiply.template", new Dictionary { ["TestName"] = "SveMultiply_ushort", ["Isa"] = "Sve", ["Method"] = "Multiply", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveMultiply.template", new Dictionary { ["TestName"] = "SveMultiply_uint", ["Isa"] = "Sve", ["Method"] = "Multiply", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveMultiply.template", new Dictionary { ["TestName"] = "SveMultiply_ulong", ["Isa"] = "Sve", ["Method"] = "Multiply", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveMultiplyAdd.template", new Dictionary { ["TestName"] = "SveMultiplyAdd_sbyte", ["Isa"] = "Sve", ["Method"] = "MultiplyAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveMultiplyAdd.template", new Dictionary { ["TestName"] = "SveMultiplyAdd_short", ["Isa"] = "Sve", ["Method"] = "MultiplyAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveMultiplyAdd.template", new Dictionary { ["TestName"] = "SveMultiplyAdd_int", ["Isa"] = "Sve", ["Method"] = "MultiplyAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveMultiplyAdd.template", new Dictionary { ["TestName"] = "SveMultiplyAdd_long", ["Isa"] = "Sve", ["Method"] = "MultiplyAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveMultiplyAdd.template", new Dictionary { ["TestName"] = "SveMultiplyAdd_byte", ["Isa"] = "Sve", ["Method"] = "MultiplyAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveMultiplyAdd.template", new Dictionary { ["TestName"] = "SveMultiplyAdd_ushort", ["Isa"] = "Sve", ["Method"] = "MultiplyAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveMultiplyAdd.template", new Dictionary { ["TestName"] = "SveMultiplyAdd_uint", ["Isa"] = "Sve", ["Method"] = "MultiplyAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveMultiplyAdd.template", new Dictionary { ["TestName"] = "SveMultiplyAdd_ulong", ["Isa"] = "Sve", ["Method"] = "MultiplyAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveMultiplyBySelectedScalar.template",new Dictionary {["TestName"] = "SveMultiplyBySelectedScalar_float", ["Isa"] = "Sve", ["Method"] = "MultiplyBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveMultiplyBySelectedScalar.template",new Dictionary {["TestName"] = "SveMultiplyBySelectedScalar_double", ["Isa"] = "Sve", ["Method"] = "MultiplyBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveMultiplyExtended.template", new Dictionary { ["TestName"] = "SveMultiplyExtended_float", ["Isa"] = "Sve", ["Method"] = "MultiplyExtended", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveMultiplyExtended.template", new Dictionary { ["TestName"] = "SveMultiplyExtended_double", ["Isa"] = "Sve", ["Method"] = "MultiplyExtended", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveMultiplySubtract.template", new Dictionary { ["TestName"] = "SveMultiplySubtract_sbyte", ["Isa"] = "Sve", ["Method"] = "MultiplySubtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveMultiplySubtract.template", new Dictionary { ["TestName"] = "SveMultiplySubtract_short", ["Isa"] = "Sve", ["Method"] = "MultiplySubtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveMultiplySubtract.template", new Dictionary { ["TestName"] = "SveMultiplySubtract_int", ["Isa"] = "Sve", ["Method"] = "MultiplySubtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveMultiplySubtract.template", new Dictionary { ["TestName"] = "SveMultiplySubtract_long", ["Isa"] = "Sve", ["Method"] = "MultiplySubtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveMultiplySubtract.template", new Dictionary { ["TestName"] = "SveMultiplySubtract_byte", ["Isa"] = "Sve", ["Method"] = "MultiplySubtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveMultiplySubtract.template", new Dictionary { ["TestName"] = "SveMultiplySubtract_ushort", ["Isa"] = "Sve", ["Method"] = "MultiplySubtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveMultiplySubtract.template", new Dictionary { ["TestName"] = "SveMultiplySubtract_uint", ["Isa"] = "Sve", ["Method"] = "MultiplySubtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveMultiplySubtract.template", new Dictionary { ["TestName"] = "SveMultiplySubtract_ulong", ["Isa"] = "Sve", ["Method"] = "MultiplySubtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveNegate.template", new Dictionary { ["TestName"] = "SveNegate_float", ["Isa"] = "Sve", ["Method"] = "Negate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveNegate.template", new Dictionary { ["TestName"] = "SveNegate_double", ["Isa"] = "Sve", ["Method"] = "Negate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveNegate.template", new Dictionary { ["TestName"] = "SveNegate_sbyte", ["Isa"] = "Sve", ["Method"] = "Negate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveNegate.template", new Dictionary { ["TestName"] = "SveNegate_short", ["Isa"] = "Sve", ["Method"] = "Negate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveNegate.template", new Dictionary { ["TestName"] = "SveNegate_int", ["Isa"] = "Sve", ["Method"] = "Negate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveNegate.template", new Dictionary { ["TestName"] = "SveNegate_long", ["Isa"] = "Sve", ["Method"] = "Negate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveSignExtend16.template", new Dictionary { ["TestName"] = "SveSignExtend16_int", ["Isa"] = "Sve", ["Method"] = "SignExtend16", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveSignExtend16.template", new Dictionary { ["TestName"] = "SveSignExtend16_long", ["Isa"] = "Sve", ["Method"] = "SignExtend16", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveSignExtend32.template", new Dictionary { ["TestName"] = "SveSignExtend32_long", ["Isa"] = "Sve", ["Method"] = "SignExtend32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveSignExtend8.template", new Dictionary { ["TestName"] = "SveSignExtend8_short", ["Isa"] = "Sve", ["Method"] = "SignExtend8", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveSignExtend8.template", new Dictionary { ["TestName"] = "SveSignExtend8_int", ["Isa"] = "Sve", ["Method"] = "SignExtend8", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveSignExtend8.template", new Dictionary { ["TestName"] = "SveSignExtend8_long", ["Isa"] = "Sve", ["Method"] = "SignExtend8", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveSignExtendWideningLower.template",new Dictionary {["TestName"] = "SveSignExtendWideningLower_short_sbyte", ["Isa"] = "Sve", ["Method"] = "SignExtendWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveSignExtendWideningLower.template",new Dictionary {["TestName"] = "SveSignExtendWideningLower_int_short", ["Isa"] = "Sve", ["Method"] = "SignExtendWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveSignExtendWideningLower.template",new Dictionary {["TestName"] = "SveSignExtendWideningLower_long_int", ["Isa"] = "Sve", ["Method"] = "SignExtendWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveSignExtendWideningUpper.template",new Dictionary {["TestName"] = "SveSignExtendWideningUpper_short_sbyte", ["Isa"] = "Sve", ["Method"] = "SignExtendWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveSignExtendWideningUpper.template",new Dictionary {["TestName"] = "SveSignExtendWideningUpper_int_short", ["Isa"] = "Sve", ["Method"] = "SignExtendWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveSignExtendWideningUpper.template",new Dictionary {["TestName"] = "SveSignExtendWideningUpper_long_int", ["Isa"] = "Sve", ["Method"] = "SignExtendWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveSubtract.template", new Dictionary { ["TestName"] = "SveSubtract_float", ["Isa"] = "Sve", ["Method"] = "Subtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveSubtract.template", new Dictionary { ["TestName"] = "SveSubtract_double", ["Isa"] = "Sve", ["Method"] = "Subtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveSubtract.template", new Dictionary { ["TestName"] = "SveSubtract_sbyte", ["Isa"] = "Sve", ["Method"] = "Subtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveSubtract.template", new Dictionary { ["TestName"] = "SveSubtract_short", ["Isa"] = "Sve", ["Method"] = "Subtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveSubtract.template", new Dictionary { ["TestName"] = "SveSubtract_int", ["Isa"] = "Sve", ["Method"] = "Subtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveSubtract.template", new Dictionary { ["TestName"] = "SveSubtract_long", ["Isa"] = "Sve", ["Method"] = "Subtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveSubtract.template", new Dictionary { ["TestName"] = "SveSubtract_byte", ["Isa"] = "Sve", ["Method"] = "Subtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveSubtract.template", new Dictionary { ["TestName"] = "SveSubtract_ushort", ["Isa"] = "Sve", ["Method"] = "Subtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveSubtract.template", new Dictionary { ["TestName"] = "SveSubtract_uint", ["Isa"] = "Sve", ["Method"] = "Subtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveSubtract.template", new Dictionary { ["TestName"] = "SveSubtract_ulong", ["Isa"] = "Sve", ["Method"] = "Subtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveSubtractSaturate.template", new Dictionary { ["TestName"] = "SveSubtractSaturate_sbyte", ["Isa"] = "Sve", ["Method"] = "SubtractSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveSubtractSaturate.template", new Dictionary { ["TestName"] = "SveSubtractSaturate_short", ["Isa"] = "Sve", ["Method"] = "SubtractSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveSubtractSaturate.template", new Dictionary { ["TestName"] = "SveSubtractSaturate_int", ["Isa"] = "Sve", ["Method"] = "SubtractSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveSubtractSaturate.template", new Dictionary { ["TestName"] = "SveSubtractSaturate_long", ["Isa"] = "Sve", ["Method"] = "SubtractSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveSubtractSaturate.template", new Dictionary { ["TestName"] = "SveSubtractSaturate_byte", ["Isa"] = "Sve", ["Method"] = "SubtractSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveSubtractSaturate.template", new Dictionary { ["TestName"] = "SveSubtractSaturate_ushort", ["Isa"] = "Sve", ["Method"] = "SubtractSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveSubtractSaturate.template", new Dictionary { ["TestName"] = "SveSubtractSaturate_uint", ["Isa"] = "Sve", ["Method"] = "SubtractSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveSubtractSaturate.template", new Dictionary { ["TestName"] = "SveSubtractSaturate_ulong", ["Isa"] = "Sve", ["Method"] = "SubtractSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveZeroExtend16.template", new Dictionary { ["TestName"] = "SveZeroExtend16_uint", ["Isa"] = "Sve", ["Method"] = "ZeroExtend16", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveZeroExtend16.template", new Dictionary { ["TestName"] = "SveZeroExtend16_ulong", ["Isa"] = "Sve", ["Method"] = "ZeroExtend16", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveZeroExtend32.template", new Dictionary { ["TestName"] = "SveZeroExtend32_ulong", ["Isa"] = "Sve", ["Method"] = "ZeroExtend32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveZeroExtend8.template", new Dictionary { ["TestName"] = "SveZeroExtend8_ushort", ["Isa"] = "Sve", ["Method"] = "ZeroExtend8", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveZeroExtend8.template", new Dictionary { ["TestName"] = "SveZeroExtend8_uint", ["Isa"] = "Sve", ["Method"] = "ZeroExtend8", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveZeroExtend8.template", new Dictionary { ["TestName"] = "SveZeroExtend8_ulong", ["Isa"] = "Sve", ["Method"] = "ZeroExtend8", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveZeroExtendWideningLower.template",new Dictionary {["TestName"] = "SveZeroExtendWideningLower_ushort_byte", ["Isa"] = "Sve", ["Method"] = "ZeroExtendWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveZeroExtendWideningLower.template",new Dictionary {["TestName"] = "SveZeroExtendWideningLower_uint_ushort", ["Isa"] = "Sve", ["Method"] = "ZeroExtendWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveZeroExtendWideningLower.template",new Dictionary {["TestName"] = "SveZeroExtendWideningLower_ulong_uint", ["Isa"] = "Sve", ["Method"] = "ZeroExtendWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveZeroExtendWideningUpper.template",new Dictionary {["TestName"] = "SveZeroExtendWideningUpper_ushort_byte", ["Isa"] = "Sve", ["Method"] = "ZeroExtendWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveZeroExtendWideningUpper.template",new Dictionary {["TestName"] = "SveZeroExtendWideningUpper_uint_ushort", ["Isa"] = "Sve", ["Method"] = "ZeroExtendWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveZeroExtendWideningUpper.template",new Dictionary {["TestName"] = "SveZeroExtendWideningUpper_ulong_uint", ["Isa"] = "Sve", ["Method"] = "ZeroExtendWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + + +// Sve mask + ("SveAbsoluteCompareGreaterThan.template",new Dictionary {["TestName"] = "SveAbsoluteCompareGreaterThan_float", ["Isa"] = "Sve", ["Method"] = "AbsoluteCompareGreaterThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveAbsoluteCompareGreaterThan.template",new Dictionary {["TestName"] = "SveAbsoluteCompareGreaterThan_double", ["Isa"] = "Sve", ["Method"] = "AbsoluteCompareGreaterThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveAbsoluteCompareGreaterThanOrEqual.template",new Dictionary {["TestName"] = "SveAbsoluteCompareGreaterThanOrEqual_float", ["Isa"] = "Sve", ["Method"] = "AbsoluteCompareGreaterThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveAbsoluteCompareGreaterThanOrEqual.template",new Dictionary {["TestName"] = "SveAbsoluteCompareGreaterThanOrEqual_double", ["Isa"] = "Sve", ["Method"] = "AbsoluteCompareGreaterThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveAbsoluteCompareLessThan.template",new Dictionary {["TestName"] = "SveAbsoluteCompareLessThan_float", ["Isa"] = "Sve", ["Method"] = "AbsoluteCompareLessThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveAbsoluteCompareLessThan.template",new Dictionary {["TestName"] = "SveAbsoluteCompareLessThan_double", ["Isa"] = "Sve", ["Method"] = "AbsoluteCompareLessThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveAbsoluteCompareLessThanOrEqual.template",new Dictionary {["TestName"] = "SveAbsoluteCompareLessThanOrEqual_float", ["Isa"] = "Sve", ["Method"] = "AbsoluteCompareLessThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveAbsoluteCompareLessThanOrEqual.template",new Dictionary {["TestName"] = "SveAbsoluteCompareLessThanOrEqual_double", ["Isa"] = "Sve", ["Method"] = "AbsoluteCompareLessThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveCompact.template", new Dictionary { ["TestName"] = "SveCompact_float", ["Isa"] = "Sve", ["Method"] = "Compact", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveCompact.template", new Dictionary { ["TestName"] = "SveCompact_double", ["Isa"] = "Sve", ["Method"] = "Compact", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveCompact.template", new Dictionary { ["TestName"] = "SveCompact_int", ["Isa"] = "Sve", ["Method"] = "Compact", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveCompact.template", new Dictionary { ["TestName"] = "SveCompact_long", ["Isa"] = "Sve", ["Method"] = "Compact", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompact.template", new Dictionary { ["TestName"] = "SveCompact_uint", ["Isa"] = "Sve", ["Method"] = "Compact", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveCompact.template", new Dictionary { ["TestName"] = "SveCompact_ulong", ["Isa"] = "Sve", ["Method"] = "Compact", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCompareEqual.template", new Dictionary { ["TestName"] = "SveCompareEqual_float", ["Isa"] = "Sve", ["Method"] = "CompareEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveCompareEqual.template", new Dictionary { ["TestName"] = "SveCompareEqual_double", ["Isa"] = "Sve", ["Method"] = "CompareEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveCompareEqual.template", new Dictionary { ["TestName"] = "SveCompareEqual_sbyte", ["Isa"] = "Sve", ["Method"] = "CompareEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveCompareEqual.template", new Dictionary { ["TestName"] = "SveCompareEqual_short", ["Isa"] = "Sve", ["Method"] = "CompareEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveCompareEqual.template", new Dictionary { ["TestName"] = "SveCompareEqual_int", ["Isa"] = "Sve", ["Method"] = "CompareEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveCompareEqual.template", new Dictionary { ["TestName"] = "SveCompareEqual_long", ["Isa"] = "Sve", ["Method"] = "CompareEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompareEqual.template", new Dictionary { ["TestName"] = "SveCompareEqual_byte", ["Isa"] = "Sve", ["Method"] = "CompareEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveCompareEqual.template", new Dictionary { ["TestName"] = "SveCompareEqual_ushort", ["Isa"] = "Sve", ["Method"] = "CompareEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveCompareEqual.template", new Dictionary { ["TestName"] = "SveCompareEqual_uint", ["Isa"] = "Sve", ["Method"] = "CompareEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveCompareEqual.template", new Dictionary { ["TestName"] = "SveCompareEqual_ulong", ["Isa"] = "Sve", ["Method"] = "CompareEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCompareEqual.template", new Dictionary { ["TestName"] = "SveCompareEqual_sbyte_long", ["Isa"] = "Sve", ["Method"] = "CompareEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompareEqual.template", new Dictionary { ["TestName"] = "SveCompareEqual_short_long", ["Isa"] = "Sve", ["Method"] = "CompareEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompareEqual.template", new Dictionary { ["TestName"] = "SveCompareEqual_int_long", ["Isa"] = "Sve", ["Method"] = "CompareEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThan.template", new Dictionary { ["TestName"] = "SveCompareGreaterThan_float", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThan.template", new Dictionary { ["TestName"] = "SveCompareGreaterThan_double", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThan.template", new Dictionary { ["TestName"] = "SveCompareGreaterThan_sbyte", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThan.template", new Dictionary { ["TestName"] = "SveCompareGreaterThan_short", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThan.template", new Dictionary { ["TestName"] = "SveCompareGreaterThan_int", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThan.template", new Dictionary { ["TestName"] = "SveCompareGreaterThan_long", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThan.template", new Dictionary { ["TestName"] = "SveCompareGreaterThan_byte", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThan.template", new Dictionary { ["TestName"] = "SveCompareGreaterThan_ushort", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThan.template", new Dictionary { ["TestName"] = "SveCompareGreaterThan_uint", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThan.template", new Dictionary { ["TestName"] = "SveCompareGreaterThan_ulong", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThan.template", new Dictionary { ["TestName"] = "SveCompareGreaterThan_sbyte_long", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThan.template", new Dictionary { ["TestName"] = "SveCompareGreaterThan_short_long", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThan.template", new Dictionary { ["TestName"] = "SveCompareGreaterThan_int_long", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThan.template", new Dictionary { ["TestName"] = "SveCompareGreaterThan_byte_ulong", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThan.template", new Dictionary { ["TestName"] = "SveCompareGreaterThan_ushort_ulong", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThan.template", new Dictionary { ["TestName"] = "SveCompareGreaterThan_uint_ulong", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThanOrEqual.template",new Dictionary {["TestName"] = "SveCompareGreaterThanOrEqual_float", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThanOrEqual.template",new Dictionary {["TestName"] = "SveCompareGreaterThanOrEqual_double", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThanOrEqual.template",new Dictionary {["TestName"] = "SveCompareGreaterThanOrEqual_sbyte", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThanOrEqual.template",new Dictionary {["TestName"] = "SveCompareGreaterThanOrEqual_short", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThanOrEqual.template",new Dictionary {["TestName"] = "SveCompareGreaterThanOrEqual_int", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThanOrEqual.template",new Dictionary {["TestName"] = "SveCompareGreaterThanOrEqual_long", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThanOrEqual.template",new Dictionary {["TestName"] = "SveCompareGreaterThanOrEqual_byte", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThanOrEqual.template",new Dictionary {["TestName"] = "SveCompareGreaterThanOrEqual_ushort", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThanOrEqual.template",new Dictionary {["TestName"] = "SveCompareGreaterThanOrEqual_uint", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThanOrEqual.template",new Dictionary {["TestName"] = "SveCompareGreaterThanOrEqual_ulong", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThanOrEqual.template",new Dictionary {["TestName"] = "SveCompareGreaterThanOrEqual_sbyte_long", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThanOrEqual.template",new Dictionary {["TestName"] = "SveCompareGreaterThanOrEqual_short_long", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThanOrEqual.template",new Dictionary {["TestName"] = "SveCompareGreaterThanOrEqual_int_long", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThanOrEqual.template",new Dictionary {["TestName"] = "SveCompareGreaterThanOrEqual_byte_ulong", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThanOrEqual.template",new Dictionary {["TestName"] = "SveCompareGreaterThanOrEqual_ushort_ulong", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCompareGreaterThanOrEqual.template",new Dictionary {["TestName"] = "SveCompareGreaterThanOrEqual_uint_ulong", ["Isa"] = "Sve", ["Method"] = "CompareGreaterThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThan.template", new Dictionary { ["TestName"] = "SveCompareLessThan_float", ["Isa"] = "Sve", ["Method"] = "CompareLessThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThan.template", new Dictionary { ["TestName"] = "SveCompareLessThan_double", ["Isa"] = "Sve", ["Method"] = "CompareLessThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThan.template", new Dictionary { ["TestName"] = "SveCompareLessThan_sbyte", ["Isa"] = "Sve", ["Method"] = "CompareLessThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThan.template", new Dictionary { ["TestName"] = "SveCompareLessThan_short", ["Isa"] = "Sve", ["Method"] = "CompareLessThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThan.template", new Dictionary { ["TestName"] = "SveCompareLessThan_int", ["Isa"] = "Sve", ["Method"] = "CompareLessThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThan.template", new Dictionary { ["TestName"] = "SveCompareLessThan_long", ["Isa"] = "Sve", ["Method"] = "CompareLessThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThan.template", new Dictionary { ["TestName"] = "SveCompareLessThan_byte", ["Isa"] = "Sve", ["Method"] = "CompareLessThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThan.template", new Dictionary { ["TestName"] = "SveCompareLessThan_ushort", ["Isa"] = "Sve", ["Method"] = "CompareLessThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThan.template", new Dictionary { ["TestName"] = "SveCompareLessThan_uint", ["Isa"] = "Sve", ["Method"] = "CompareLessThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThan.template", new Dictionary { ["TestName"] = "SveCompareLessThan_ulong", ["Isa"] = "Sve", ["Method"] = "CompareLessThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThan.template", new Dictionary { ["TestName"] = "SveCompareLessThan_sbyte_long", ["Isa"] = "Sve", ["Method"] = "CompareLessThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThan.template", new Dictionary { ["TestName"] = "SveCompareLessThan_short_long", ["Isa"] = "Sve", ["Method"] = "CompareLessThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThan.template", new Dictionary { ["TestName"] = "SveCompareLessThan_int_long", ["Isa"] = "Sve", ["Method"] = "CompareLessThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThan.template", new Dictionary { ["TestName"] = "SveCompareLessThan_byte_ulong", ["Isa"] = "Sve", ["Method"] = "CompareLessThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThan.template", new Dictionary { ["TestName"] = "SveCompareLessThan_ushort_ulong", ["Isa"] = "Sve", ["Method"] = "CompareLessThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThan.template", new Dictionary { ["TestName"] = "SveCompareLessThan_uint_ulong", ["Isa"] = "Sve", ["Method"] = "CompareLessThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThanOrEqual.template",new Dictionary { ["TestName"] = "SveCompareLessThanOrEqual_float", ["Isa"] = "Sve", ["Method"] = "CompareLessThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThanOrEqual.template",new Dictionary { ["TestName"] = "SveCompareLessThanOrEqual_double", ["Isa"] = "Sve", ["Method"] = "CompareLessThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThanOrEqual.template",new Dictionary { ["TestName"] = "SveCompareLessThanOrEqual_sbyte", ["Isa"] = "Sve", ["Method"] = "CompareLessThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThanOrEqual.template",new Dictionary { ["TestName"] = "SveCompareLessThanOrEqual_short", ["Isa"] = "Sve", ["Method"] = "CompareLessThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThanOrEqual.template",new Dictionary { ["TestName"] = "SveCompareLessThanOrEqual_int", ["Isa"] = "Sve", ["Method"] = "CompareLessThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThanOrEqual.template",new Dictionary { ["TestName"] = "SveCompareLessThanOrEqual_long", ["Isa"] = "Sve", ["Method"] = "CompareLessThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThanOrEqual.template",new Dictionary { ["TestName"] = "SveCompareLessThanOrEqual_byte", ["Isa"] = "Sve", ["Method"] = "CompareLessThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThanOrEqual.template",new Dictionary { ["TestName"] = "SveCompareLessThanOrEqual_ushort", ["Isa"] = "Sve", ["Method"] = "CompareLessThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThanOrEqual.template",new Dictionary { ["TestName"] = "SveCompareLessThanOrEqual_uint", ["Isa"] = "Sve", ["Method"] = "CompareLessThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThanOrEqual.template",new Dictionary { ["TestName"] = "SveCompareLessThanOrEqual_ulong", ["Isa"] = "Sve", ["Method"] = "CompareLessThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThanOrEqual.template",new Dictionary { ["TestName"] = "SveCompareLessThanOrEqual_sbyte_long", ["Isa"] = "Sve", ["Method"] = "CompareLessThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThanOrEqual.template",new Dictionary { ["TestName"] = "SveCompareLessThanOrEqual_short_long", ["Isa"] = "Sve", ["Method"] = "CompareLessThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThanOrEqual.template",new Dictionary { ["TestName"] = "SveCompareLessThanOrEqual_int_long", ["Isa"] = "Sve", ["Method"] = "CompareLessThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThanOrEqual.template",new Dictionary { ["TestName"] = "SveCompareLessThanOrEqual_byte_ulong", ["Isa"] = "Sve", ["Method"] = "CompareLessThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThanOrEqual.template",new Dictionary { ["TestName"] = "SveCompareLessThanOrEqual_ushort_ulong", ["Isa"] = "Sve", ["Method"] = "CompareLessThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCompareLessThanOrEqual.template",new Dictionary { ["TestName"] = "SveCompareLessThanOrEqual_uint_ulong", ["Isa"] = "Sve", ["Method"] = "CompareLessThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCompareNotEqualTo.template", new Dictionary { ["TestName"] = "SveCompareNotEqualTo_float", ["Isa"] = "Sve", ["Method"] = "CompareNotEqualTo", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveCompareNotEqualTo.template", new Dictionary { ["TestName"] = "SveCompareNotEqualTo_double", ["Isa"] = "Sve", ["Method"] = "CompareNotEqualTo", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveCompareNotEqualTo.template", new Dictionary { ["TestName"] = "SveCompareNotEqualTo_sbyte", ["Isa"] = "Sve", ["Method"] = "CompareNotEqualTo", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveCompareNotEqualTo.template", new Dictionary { ["TestName"] = "SveCompareNotEqualTo_short", ["Isa"] = "Sve", ["Method"] = "CompareNotEqualTo", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveCompareNotEqualTo.template", new Dictionary { ["TestName"] = "SveCompareNotEqualTo_int", ["Isa"] = "Sve", ["Method"] = "CompareNotEqualTo", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveCompareNotEqualTo.template", new Dictionary { ["TestName"] = "SveCompareNotEqualTo_long", ["Isa"] = "Sve", ["Method"] = "CompareNotEqualTo", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompareNotEqualTo.template", new Dictionary { ["TestName"] = "SveCompareNotEqualTo_byte", ["Isa"] = "Sve", ["Method"] = "CompareNotEqualTo", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveCompareNotEqualTo.template", new Dictionary { ["TestName"] = "SveCompareNotEqualTo_ushort", ["Isa"] = "Sve", ["Method"] = "CompareNotEqualTo", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveCompareNotEqualTo.template", new Dictionary { ["TestName"] = "SveCompareNotEqualTo_uint", ["Isa"] = "Sve", ["Method"] = "CompareNotEqualTo", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveCompareNotEqualTo.template", new Dictionary { ["TestName"] = "SveCompareNotEqualTo_ulong", ["Isa"] = "Sve", ["Method"] = "CompareNotEqualTo", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCompareNotEqualTo.template", new Dictionary { ["TestName"] = "SveCompareNotEqualTo_sbyte_long", ["Isa"] = "Sve", ["Method"] = "CompareNotEqualTo", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompareNotEqualTo.template", new Dictionary { ["TestName"] = "SveCompareNotEqualTo_short_long", ["Isa"] = "Sve", ["Method"] = "CompareNotEqualTo", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompareNotEqualTo.template", new Dictionary { ["TestName"] = "SveCompareNotEqualTo_int_long", ["Isa"] = "Sve", ["Method"] = "CompareNotEqualTo", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompareUnordered.template", new Dictionary { ["TestName"] = "SveCompareUnordered_float", ["Isa"] = "Sve", ["Method"] = "CompareUnordered", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveCompareUnordered.template", new Dictionary { ["TestName"] = "SveCompareUnordered_double", ["Isa"] = "Sve", ["Method"] = "CompareUnordered", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElement_float", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElement_float", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElement", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElement_double", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElement_double", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElement", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElement_sbyte", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElement_sbyte", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElement", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElement_short", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElement_short", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElement", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElement_int", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElement_int", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElement", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElement_long", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElement_long", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElement", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElement_byte", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElement_byte", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElement", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElement_ushort", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElement_ushort", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElement", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElement_uint", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElement_uint", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElement", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElement_ulong", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElement_ulong", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElement", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElementAndReplicate.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElementAndReplicate_float", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElementAndReplicate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElementAndReplicate.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElementAndReplicate_double", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElementAndReplicate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElementAndReplicate.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElementAndReplicate_sbyte", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElementAndReplicate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElementAndReplicate.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElementAndReplicate_short", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElementAndReplicate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElementAndReplicate.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElementAndReplicate_int", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElementAndReplicate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElementAndReplicate.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElementAndReplicate_long", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElementAndReplicate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElementAndReplicate.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElementAndReplicate_byte", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElementAndReplicate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElementAndReplicate.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElementAndReplicate_ushort", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElementAndReplicate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElementAndReplicate.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElementAndReplicate_uint", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElementAndReplicate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractAfterLastActiveElementAndReplicate.template",new Dictionary {["TestName"] = "SveConditionalExtractAfterLastActiveElementAndReplicate_ulong", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractAfterLastActiveElementAndReplicate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElement_float", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElement_float", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElement", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElement_double", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElement_double", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElement", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElement_sbyte", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElement_sbyte", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElement", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElement_short", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElement_short", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElement", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElement_int", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElement_int", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElement", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElement_long", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElement_long", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElement", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElement_byte", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElement_byte", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElement", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElement_ushort", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElement_ushort", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElement", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElement_uint", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElement_uint", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElement", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElement_ulong", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElement.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElement_ulong", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElement", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElementAndReplicate.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElementAndReplicate_float", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElementAndReplicate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElementAndReplicate.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElementAndReplicate_double", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElementAndReplicate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElementAndReplicate.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElementAndReplicate_sbyte", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElementAndReplicate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElementAndReplicate.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElementAndReplicate_short", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElementAndReplicate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElementAndReplicate.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElementAndReplicate_int", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElementAndReplicate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElementAndReplicate.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElementAndReplicate_long", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElementAndReplicate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElementAndReplicate.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElementAndReplicate_byte", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElementAndReplicate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElementAndReplicate.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElementAndReplicate_ushort", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElementAndReplicate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElementAndReplicate.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElementAndReplicate_uint", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElementAndReplicate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveConditionalExtractLastActiveElementAndReplicate.template",new Dictionary {["TestName"] = "SveConditionalExtractLastActiveElementAndReplicate_ulong", ["Isa"] = "Sve", ["Method"] = "ConditionalExtractLastActiveElementAndReplicate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveConditionalSelect.template", new Dictionary { ["TestName"] = "SveConditionalSelect_float", ["Isa"] = "Sve", ["Method"] = "ConditionalSelect", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveConditionalSelect.template", new Dictionary { ["TestName"] = "SveConditionalSelect_double", ["Isa"] = "Sve", ["Method"] = "ConditionalSelect", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveConditionalSelect.template", new Dictionary { ["TestName"] = "SveConditionalSelect_sbyte", ["Isa"] = "Sve", ["Method"] = "ConditionalSelect", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveConditionalSelect.template", new Dictionary { ["TestName"] = "SveConditionalSelect_short", ["Isa"] = "Sve", ["Method"] = "ConditionalSelect", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveConditionalSelect.template", new Dictionary { ["TestName"] = "SveConditionalSelect_int", ["Isa"] = "Sve", ["Method"] = "ConditionalSelect", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveConditionalSelect.template", new Dictionary { ["TestName"] = "SveConditionalSelect_long", ["Isa"] = "Sve", ["Method"] = "ConditionalSelect", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveConditionalSelect.template", new Dictionary { ["TestName"] = "SveConditionalSelect_byte", ["Isa"] = "Sve", ["Method"] = "ConditionalSelect", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveConditionalSelect.template", new Dictionary { ["TestName"] = "SveConditionalSelect_ushort", ["Isa"] = "Sve", ["Method"] = "ConditionalSelect", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveConditionalSelect.template", new Dictionary { ["TestName"] = "SveConditionalSelect_uint", ["Isa"] = "Sve", ["Method"] = "ConditionalSelect", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveConditionalSelect.template", new Dictionary { ["TestName"] = "SveConditionalSelect_ulong", ["Isa"] = "Sve", ["Method"] = "ConditionalSelect", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakAfterMask.template", new Dictionary { ["TestName"] = "SveCreateBreakAfterMask_sbyte", ["Isa"] = "Sve", ["Method"] = "CreateBreakAfterMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakAfterMask.template", new Dictionary { ["TestName"] = "SveCreateBreakAfterMask_short", ["Isa"] = "Sve", ["Method"] = "CreateBreakAfterMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakAfterMask.template", new Dictionary { ["TestName"] = "SveCreateBreakAfterMask_int", ["Isa"] = "Sve", ["Method"] = "CreateBreakAfterMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakAfterMask.template", new Dictionary { ["TestName"] = "SveCreateBreakAfterMask_long", ["Isa"] = "Sve", ["Method"] = "CreateBreakAfterMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakAfterMask.template", new Dictionary { ["TestName"] = "SveCreateBreakAfterMask_byte", ["Isa"] = "Sve", ["Method"] = "CreateBreakAfterMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakAfterMask.template", new Dictionary { ["TestName"] = "SveCreateBreakAfterMask_ushort", ["Isa"] = "Sve", ["Method"] = "CreateBreakAfterMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakAfterMask.template", new Dictionary { ["TestName"] = "SveCreateBreakAfterMask_uint", ["Isa"] = "Sve", ["Method"] = "CreateBreakAfterMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakAfterMask.template", new Dictionary { ["TestName"] = "SveCreateBreakAfterMask_ulong", ["Isa"] = "Sve", ["Method"] = "CreateBreakAfterMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakAfterPropagateMask.template",new Dictionary {["TestName"] = "SveCreateBreakAfterPropagateMask_sbyte", ["Isa"] = "Sve", ["Method"] = "CreateBreakAfterPropagateMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakAfterPropagateMask.template",new Dictionary {["TestName"] = "SveCreateBreakAfterPropagateMask_short", ["Isa"] = "Sve", ["Method"] = "CreateBreakAfterPropagateMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakAfterPropagateMask.template",new Dictionary {["TestName"] = "SveCreateBreakAfterPropagateMask_int", ["Isa"] = "Sve", ["Method"] = "CreateBreakAfterPropagateMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakAfterPropagateMask.template",new Dictionary {["TestName"] = "SveCreateBreakAfterPropagateMask_long", ["Isa"] = "Sve", ["Method"] = "CreateBreakAfterPropagateMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakAfterPropagateMask.template",new Dictionary {["TestName"] = "SveCreateBreakAfterPropagateMask_byte", ["Isa"] = "Sve", ["Method"] = "CreateBreakAfterPropagateMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakAfterPropagateMask.template",new Dictionary {["TestName"] = "SveCreateBreakAfterPropagateMask_ushort", ["Isa"] = "Sve", ["Method"] = "CreateBreakAfterPropagateMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakAfterPropagateMask.template",new Dictionary {["TestName"] = "SveCreateBreakAfterPropagateMask_uint", ["Isa"] = "Sve", ["Method"] = "CreateBreakAfterPropagateMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakAfterPropagateMask.template",new Dictionary {["TestName"] = "SveCreateBreakAfterPropagateMask_ulong", ["Isa"] = "Sve", ["Method"] = "CreateBreakAfterPropagateMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakBeforeMask.template", new Dictionary { ["TestName"] = "SveCreateBreakBeforeMask_sbyte", ["Isa"] = "Sve", ["Method"] = "CreateBreakBeforeMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakBeforeMask.template", new Dictionary { ["TestName"] = "SveCreateBreakBeforeMask_short", ["Isa"] = "Sve", ["Method"] = "CreateBreakBeforeMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakBeforeMask.template", new Dictionary { ["TestName"] = "SveCreateBreakBeforeMask_int", ["Isa"] = "Sve", ["Method"] = "CreateBreakBeforeMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakBeforeMask.template", new Dictionary { ["TestName"] = "SveCreateBreakBeforeMask_long", ["Isa"] = "Sve", ["Method"] = "CreateBreakBeforeMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakBeforeMask.template", new Dictionary { ["TestName"] = "SveCreateBreakBeforeMask_byte", ["Isa"] = "Sve", ["Method"] = "CreateBreakBeforeMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakBeforeMask.template", new Dictionary { ["TestName"] = "SveCreateBreakBeforeMask_ushort", ["Isa"] = "Sve", ["Method"] = "CreateBreakBeforeMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakBeforeMask.template", new Dictionary { ["TestName"] = "SveCreateBreakBeforeMask_uint", ["Isa"] = "Sve", ["Method"] = "CreateBreakBeforeMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakBeforeMask.template", new Dictionary { ["TestName"] = "SveCreateBreakBeforeMask_ulong", ["Isa"] = "Sve", ["Method"] = "CreateBreakBeforeMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakBeforePropagateMask.template",new Dictionary {["TestName"] = "SveCreateBreakBeforePropagateMask_sbyte", ["Isa"] = "Sve", ["Method"] = "CreateBreakBeforePropagateMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakBeforePropagateMask.template",new Dictionary {["TestName"] = "SveCreateBreakBeforePropagateMask_short", ["Isa"] = "Sve", ["Method"] = "CreateBreakBeforePropagateMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakBeforePropagateMask.template",new Dictionary {["TestName"] = "SveCreateBreakBeforePropagateMask_int", ["Isa"] = "Sve", ["Method"] = "CreateBreakBeforePropagateMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakBeforePropagateMask.template",new Dictionary {["TestName"] = "SveCreateBreakBeforePropagateMask_long", ["Isa"] = "Sve", ["Method"] = "CreateBreakBeforePropagateMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakBeforePropagateMask.template",new Dictionary {["TestName"] = "SveCreateBreakBeforePropagateMask_byte", ["Isa"] = "Sve", ["Method"] = "CreateBreakBeforePropagateMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakBeforePropagateMask.template",new Dictionary {["TestName"] = "SveCreateBreakBeforePropagateMask_ushort", ["Isa"] = "Sve", ["Method"] = "CreateBreakBeforePropagateMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakBeforePropagateMask.template",new Dictionary {["TestName"] = "SveCreateBreakBeforePropagateMask_uint", ["Isa"] = "Sve", ["Method"] = "CreateBreakBeforePropagateMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakBeforePropagateMask.template",new Dictionary {["TestName"] = "SveCreateBreakBeforePropagateMask_ulong", ["Isa"] = "Sve", ["Method"] = "CreateBreakBeforePropagateMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakPropagateMask.template",new Dictionary {["TestName"] = "SveCreateBreakPropagateMask_sbyte", ["Isa"] = "Sve", ["Method"] = "CreateBreakPropagateMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakPropagateMask.template",new Dictionary {["TestName"] = "SveCreateBreakPropagateMask_short", ["Isa"] = "Sve", ["Method"] = "CreateBreakPropagateMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakPropagateMask.template",new Dictionary {["TestName"] = "SveCreateBreakPropagateMask_int", ["Isa"] = "Sve", ["Method"] = "CreateBreakPropagateMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakPropagateMask.template",new Dictionary {["TestName"] = "SveCreateBreakPropagateMask_long", ["Isa"] = "Sve", ["Method"] = "CreateBreakPropagateMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakPropagateMask.template",new Dictionary {["TestName"] = "SveCreateBreakPropagateMask_byte", ["Isa"] = "Sve", ["Method"] = "CreateBreakPropagateMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakPropagateMask.template",new Dictionary {["TestName"] = "SveCreateBreakPropagateMask_ushort", ["Isa"] = "Sve", ["Method"] = "CreateBreakPropagateMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakPropagateMask.template",new Dictionary {["TestName"] = "SveCreateBreakPropagateMask_uint", ["Isa"] = "Sve", ["Method"] = "CreateBreakPropagateMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveCreateBreakPropagateMask.template",new Dictionary {["TestName"] = "SveCreateBreakPropagateMask_ulong", ["Isa"] = "Sve", ["Method"] = "CreateBreakPropagateMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCreateFalseMaskByte.template", new Dictionary { ["TestName"] = "SveCreateFalseMaskByte_byte", ["Isa"] = "Sve", ["Method"] = "CreateFalseMaskByte", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveCreateFalseMaskDouble.template", new Dictionary { ["TestName"] = "SveCreateFalseMaskDouble_double", ["Isa"] = "Sve", ["Method"] = "CreateFalseMaskDouble", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveCreateFalseMaskInt16.template", new Dictionary { ["TestName"] = "SveCreateFalseMaskInt16_short", ["Isa"] = "Sve", ["Method"] = "CreateFalseMaskInt16", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveCreateFalseMaskInt32.template", new Dictionary { ["TestName"] = "SveCreateFalseMaskInt32_int", ["Isa"] = "Sve", ["Method"] = "CreateFalseMaskInt32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveCreateFalseMaskInt64.template", new Dictionary { ["TestName"] = "SveCreateFalseMaskInt64_long", ["Isa"] = "Sve", ["Method"] = "CreateFalseMaskInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCreateFalseMaskSByte.template", new Dictionary { ["TestName"] = "SveCreateFalseMaskSByte_sbyte", ["Isa"] = "Sve", ["Method"] = "CreateFalseMaskSByte", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveCreateFalseMaskSingle.template", new Dictionary { ["TestName"] = "SveCreateFalseMaskSingle_float", ["Isa"] = "Sve", ["Method"] = "CreateFalseMaskSingle", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveCreateFalseMaskUInt16.template", new Dictionary { ["TestName"] = "SveCreateFalseMaskUInt16_ushort", ["Isa"] = "Sve", ["Method"] = "CreateFalseMaskUInt16", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveCreateFalseMaskUInt32.template", new Dictionary { ["TestName"] = "SveCreateFalseMaskUInt32_uint", ["Isa"] = "Sve", ["Method"] = "CreateFalseMaskUInt32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveCreateFalseMaskUInt64.template", new Dictionary { ["TestName"] = "SveCreateFalseMaskUInt64_ulong", ["Isa"] = "Sve", ["Method"] = "CreateFalseMaskUInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCreateMaskForFirstActiveElement.template",new Dictionary {["TestName"] = "SveCreateMaskForFirstActiveElement_sbyte", ["Isa"] = "Sve", ["Method"] = "CreateMaskForFirstActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveCreateMaskForFirstActiveElement.template",new Dictionary {["TestName"] = "SveCreateMaskForFirstActiveElement_short", ["Isa"] = "Sve", ["Method"] = "CreateMaskForFirstActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveCreateMaskForFirstActiveElement.template",new Dictionary {["TestName"] = "SveCreateMaskForFirstActiveElement_int", ["Isa"] = "Sve", ["Method"] = "CreateMaskForFirstActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveCreateMaskForFirstActiveElement.template",new Dictionary {["TestName"] = "SveCreateMaskForFirstActiveElement_long", ["Isa"] = "Sve", ["Method"] = "CreateMaskForFirstActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCreateMaskForFirstActiveElement.template",new Dictionary {["TestName"] = "SveCreateMaskForFirstActiveElement_byte", ["Isa"] = "Sve", ["Method"] = "CreateMaskForFirstActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveCreateMaskForFirstActiveElement.template",new Dictionary {["TestName"] = "SveCreateMaskForFirstActiveElement_ushort", ["Isa"] = "Sve", ["Method"] = "CreateMaskForFirstActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveCreateMaskForFirstActiveElement.template",new Dictionary {["TestName"] = "SveCreateMaskForFirstActiveElement_uint", ["Isa"] = "Sve", ["Method"] = "CreateMaskForFirstActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveCreateMaskForFirstActiveElement.template",new Dictionary {["TestName"] = "SveCreateMaskForFirstActiveElement_ulong", ["Isa"] = "Sve", ["Method"] = "CreateMaskForFirstActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCreateMaskForNextActiveElement.template",new Dictionary {["TestName"] = "SveCreateMaskForNextActiveElement_byte", ["Isa"] = "Sve", ["Method"] = "CreateMaskForNextActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveCreateMaskForNextActiveElement.template",new Dictionary {["TestName"] = "SveCreateMaskForNextActiveElement_ushort", ["Isa"] = "Sve", ["Method"] = "CreateMaskForNextActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveCreateMaskForNextActiveElement.template",new Dictionary {["TestName"] = "SveCreateMaskForNextActiveElement_uint", ["Isa"] = "Sve", ["Method"] = "CreateMaskForNextActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveCreateMaskForNextActiveElement.template",new Dictionary {["TestName"] = "SveCreateMaskForNextActiveElement_ulong", ["Isa"] = "Sve", ["Method"] = "CreateMaskForNextActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCreateTrueMaskByte.template", new Dictionary { ["TestName"] = "SveCreateTrueMaskByte_byte", ["Isa"] = "Sve", ["Method"] = "CreateTrueMaskByte", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1BaseType"] = "SveMaskPattern", ["LargestVectorSize"] = "8",}), + ("SveCreateTrueMaskDouble.template", new Dictionary { ["TestName"] = "SveCreateTrueMaskDouble_double", ["Isa"] = "Sve", ["Method"] = "CreateTrueMaskDouble", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1BaseType"] = "SveMaskPattern", ["LargestVectorSize"] = "8",}), + ("SveCreateTrueMaskInt16.template", new Dictionary { ["TestName"] = "SveCreateTrueMaskInt16_short", ["Isa"] = "Sve", ["Method"] = "CreateTrueMaskInt16", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1BaseType"] = "SveMaskPattern", ["LargestVectorSize"] = "8",}), + ("SveCreateTrueMaskInt32.template", new Dictionary { ["TestName"] = "SveCreateTrueMaskInt32_int", ["Isa"] = "Sve", ["Method"] = "CreateTrueMaskInt32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1BaseType"] = "SveMaskPattern", ["LargestVectorSize"] = "8",}), + ("SveCreateTrueMaskInt64.template", new Dictionary { ["TestName"] = "SveCreateTrueMaskInt64_long", ["Isa"] = "Sve", ["Method"] = "CreateTrueMaskInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1BaseType"] = "SveMaskPattern", ["LargestVectorSize"] = "8",}), + ("SveCreateTrueMaskSByte.template", new Dictionary { ["TestName"] = "SveCreateTrueMaskSByte_sbyte", ["Isa"] = "Sve", ["Method"] = "CreateTrueMaskSByte", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1BaseType"] = "SveMaskPattern", ["LargestVectorSize"] = "8",}), + ("SveCreateTrueMaskSingle.template", new Dictionary { ["TestName"] = "SveCreateTrueMaskSingle_float", ["Isa"] = "Sve", ["Method"] = "CreateTrueMaskSingle", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1BaseType"] = "SveMaskPattern", ["LargestVectorSize"] = "8",}), + ("SveCreateTrueMaskUInt16.template", new Dictionary { ["TestName"] = "SveCreateTrueMaskUInt16_ushort", ["Isa"] = "Sve", ["Method"] = "CreateTrueMaskUInt16", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1BaseType"] = "SveMaskPattern", ["LargestVectorSize"] = "8",}), + ("SveCreateTrueMaskUInt32.template", new Dictionary { ["TestName"] = "SveCreateTrueMaskUInt32_uint", ["Isa"] = "Sve", ["Method"] = "CreateTrueMaskUInt32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "SveMaskPattern", ["LargestVectorSize"] = "8",}), + ("SveCreateTrueMaskUInt64.template", new Dictionary { ["TestName"] = "SveCreateTrueMaskUInt64_ulong", ["Isa"] = "Sve", ["Method"] = "CreateTrueMaskUInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "SveMaskPattern", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanMask16Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanMask16Bit_ushort", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanMask16Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanMask16Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanMask16Bit_ushort", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanMask16Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanMask16Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanMask16Bit_ushort", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanMask16Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanMask16Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanMask16Bit_ushort", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanMask16Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanMask32Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanMask32Bit_uint", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanMask32Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanMask32Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanMask32Bit_uint", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanMask32Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanMask32Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanMask32Bit_uint", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanMask32Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanMask32Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanMask32Bit_uint", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanMask32Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanMask64Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanMask64Bit_ulong", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanMask64Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanMask64Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanMask64Bit_ulong", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanMask64Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanMask64Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanMask64Bit_ulong", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanMask64Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanMask64Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanMask64Bit_ulong", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanMask64Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanMask8Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanMask8Bit_byte", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanMask8Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanMask8Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanMask8Bit_byte", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanMask8Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanMask8Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanMask8Bit_byte", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanMask8Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanMask8Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanMask8Bit_byte", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanMask8Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanOrEqualMask16Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanOrEqualMask16Bit_ushort", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanOrEqualMask16Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanOrEqualMask16Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanOrEqualMask16Bit_ushort", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanOrEqualMask16Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanOrEqualMask16Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanOrEqualMask16Bit_ushort", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanOrEqualMask16Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanOrEqualMask16Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanOrEqualMask16Bit_ushort", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanOrEqualMask16Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanOrEqualMask32Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanOrEqualMask32Bit_uint", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanOrEqualMask32Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanOrEqualMask32Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanOrEqualMask32Bit_uint", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanOrEqualMask32Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanOrEqualMask32Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanOrEqualMask32Bit_uint", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanOrEqualMask32Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanOrEqualMask32Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanOrEqualMask32Bit_uint", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanOrEqualMask32Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanOrEqualMask64Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanOrEqualMask64Bit_ulong", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanOrEqualMask64Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanOrEqualMask64Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanOrEqualMask64Bit_ulong", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanOrEqualMask64Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanOrEqualMask64Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanOrEqualMask64Bit_ulong", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanOrEqualMask64Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanOrEqualMask64Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanOrEqualMask64Bit_ulong", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanOrEqualMask64Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanOrEqualMask8Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanOrEqualMask8Bit_byte", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanOrEqualMask8Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanOrEqualMask8Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanOrEqualMask8Bit_byte", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanOrEqualMask8Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanOrEqualMask8Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanOrEqualMask8Bit_byte", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanOrEqualMask8Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveCreateWhileLessThanOrEqualMask8Bit.template",new Dictionary {["TestName"] = "SveCreateWhileLessThanOrEqualMask8Bit_byte", ["Isa"] = "Sve", ["Method"] = "CreateWhileLessThanOrEqualMask8Bit", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveExtractAfterLastScalar.template",new Dictionary { ["TestName"] = "SveExtractAfterLastScalar_float", ["Isa"] = "Sve", ["Method"] = "ExtractAfterLastScalar", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveExtractAfterLastScalar.template",new Dictionary { ["TestName"] = "SveExtractAfterLastScalar_double", ["Isa"] = "Sve", ["Method"] = "ExtractAfterLastScalar", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveExtractAfterLastScalar.template",new Dictionary { ["TestName"] = "SveExtractAfterLastScalar_sbyte", ["Isa"] = "Sve", ["Method"] = "ExtractAfterLastScalar", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveExtractAfterLastScalar.template",new Dictionary { ["TestName"] = "SveExtractAfterLastScalar_short", ["Isa"] = "Sve", ["Method"] = "ExtractAfterLastScalar", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveExtractAfterLastScalar.template",new Dictionary { ["TestName"] = "SveExtractAfterLastScalar_int", ["Isa"] = "Sve", ["Method"] = "ExtractAfterLastScalar", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveExtractAfterLastScalar.template",new Dictionary { ["TestName"] = "SveExtractAfterLastScalar_long", ["Isa"] = "Sve", ["Method"] = "ExtractAfterLastScalar", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveExtractAfterLastScalar.template",new Dictionary { ["TestName"] = "SveExtractAfterLastScalar_byte", ["Isa"] = "Sve", ["Method"] = "ExtractAfterLastScalar", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveExtractAfterLastScalar.template",new Dictionary { ["TestName"] = "SveExtractAfterLastScalar_ushort", ["Isa"] = "Sve", ["Method"] = "ExtractAfterLastScalar", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveExtractAfterLastScalar.template",new Dictionary { ["TestName"] = "SveExtractAfterLastScalar_uint", ["Isa"] = "Sve", ["Method"] = "ExtractAfterLastScalar", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveExtractAfterLastScalar.template",new Dictionary { ["TestName"] = "SveExtractAfterLastScalar_ulong", ["Isa"] = "Sve", ["Method"] = "ExtractAfterLastScalar", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveExtractAfterLastVector.template",new Dictionary { ["TestName"] = "SveExtractAfterLastVector_float", ["Isa"] = "Sve", ["Method"] = "ExtractAfterLastVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveExtractAfterLastVector.template",new Dictionary { ["TestName"] = "SveExtractAfterLastVector_double", ["Isa"] = "Sve", ["Method"] = "ExtractAfterLastVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveExtractAfterLastVector.template",new Dictionary { ["TestName"] = "SveExtractAfterLastVector_sbyte", ["Isa"] = "Sve", ["Method"] = "ExtractAfterLastVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveExtractAfterLastVector.template",new Dictionary { ["TestName"] = "SveExtractAfterLastVector_short", ["Isa"] = "Sve", ["Method"] = "ExtractAfterLastVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveExtractAfterLastVector.template",new Dictionary { ["TestName"] = "SveExtractAfterLastVector_int", ["Isa"] = "Sve", ["Method"] = "ExtractAfterLastVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveExtractAfterLastVector.template",new Dictionary { ["TestName"] = "SveExtractAfterLastVector_long", ["Isa"] = "Sve", ["Method"] = "ExtractAfterLastVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveExtractAfterLastVector.template",new Dictionary { ["TestName"] = "SveExtractAfterLastVector_byte", ["Isa"] = "Sve", ["Method"] = "ExtractAfterLastVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveExtractAfterLastVector.template",new Dictionary { ["TestName"] = "SveExtractAfterLastVector_ushort", ["Isa"] = "Sve", ["Method"] = "ExtractAfterLastVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveExtractAfterLastVector.template",new Dictionary { ["TestName"] = "SveExtractAfterLastVector_uint", ["Isa"] = "Sve", ["Method"] = "ExtractAfterLastVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveExtractAfterLastVector.template",new Dictionary { ["TestName"] = "SveExtractAfterLastVector_ulong", ["Isa"] = "Sve", ["Method"] = "ExtractAfterLastVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveExtractLastScalar.template", new Dictionary { ["TestName"] = "SveExtractLastScalar_float", ["Isa"] = "Sve", ["Method"] = "ExtractLastScalar", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveExtractLastScalar.template", new Dictionary { ["TestName"] = "SveExtractLastScalar_double", ["Isa"] = "Sve", ["Method"] = "ExtractLastScalar", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveExtractLastScalar.template", new Dictionary { ["TestName"] = "SveExtractLastScalar_sbyte", ["Isa"] = "Sve", ["Method"] = "ExtractLastScalar", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveExtractLastScalar.template", new Dictionary { ["TestName"] = "SveExtractLastScalar_short", ["Isa"] = "Sve", ["Method"] = "ExtractLastScalar", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveExtractLastScalar.template", new Dictionary { ["TestName"] = "SveExtractLastScalar_int", ["Isa"] = "Sve", ["Method"] = "ExtractLastScalar", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveExtractLastScalar.template", new Dictionary { ["TestName"] = "SveExtractLastScalar_long", ["Isa"] = "Sve", ["Method"] = "ExtractLastScalar", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveExtractLastScalar.template", new Dictionary { ["TestName"] = "SveExtractLastScalar_byte", ["Isa"] = "Sve", ["Method"] = "ExtractLastScalar", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveExtractLastScalar.template", new Dictionary { ["TestName"] = "SveExtractLastScalar_ushort", ["Isa"] = "Sve", ["Method"] = "ExtractLastScalar", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveExtractLastScalar.template", new Dictionary { ["TestName"] = "SveExtractLastScalar_uint", ["Isa"] = "Sve", ["Method"] = "ExtractLastScalar", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveExtractLastScalar.template", new Dictionary { ["TestName"] = "SveExtractLastScalar_ulong", ["Isa"] = "Sve", ["Method"] = "ExtractLastScalar", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveExtractLastVector.template", new Dictionary { ["TestName"] = "SveExtractLastVector_float", ["Isa"] = "Sve", ["Method"] = "ExtractLastVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveExtractLastVector.template", new Dictionary { ["TestName"] = "SveExtractLastVector_double", ["Isa"] = "Sve", ["Method"] = "ExtractLastVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveExtractLastVector.template", new Dictionary { ["TestName"] = "SveExtractLastVector_sbyte", ["Isa"] = "Sve", ["Method"] = "ExtractLastVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveExtractLastVector.template", new Dictionary { ["TestName"] = "SveExtractLastVector_short", ["Isa"] = "Sve", ["Method"] = "ExtractLastVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveExtractLastVector.template", new Dictionary { ["TestName"] = "SveExtractLastVector_int", ["Isa"] = "Sve", ["Method"] = "ExtractLastVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveExtractLastVector.template", new Dictionary { ["TestName"] = "SveExtractLastVector_long", ["Isa"] = "Sve", ["Method"] = "ExtractLastVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveExtractLastVector.template", new Dictionary { ["TestName"] = "SveExtractLastVector_byte", ["Isa"] = "Sve", ["Method"] = "ExtractLastVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveExtractLastVector.template", new Dictionary { ["TestName"] = "SveExtractLastVector_ushort", ["Isa"] = "Sve", ["Method"] = "ExtractLastVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveExtractLastVector.template", new Dictionary { ["TestName"] = "SveExtractLastVector_uint", ["Isa"] = "Sve", ["Method"] = "ExtractLastVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveExtractLastVector.template", new Dictionary { ["TestName"] = "SveExtractLastVector_ulong", ["Isa"] = "Sve", ["Method"] = "ExtractLastVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveExtractVector.template", new Dictionary { ["TestName"] = "SveExtractVector_float", ["Isa"] = "Sve", ["Method"] = "ExtractVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveExtractVector.template", new Dictionary { ["TestName"] = "SveExtractVector_double", ["Isa"] = "Sve", ["Method"] = "ExtractVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveExtractVector.template", new Dictionary { ["TestName"] = "SveExtractVector_sbyte", ["Isa"] = "Sve", ["Method"] = "ExtractVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveExtractVector.template", new Dictionary { ["TestName"] = "SveExtractVector_short", ["Isa"] = "Sve", ["Method"] = "ExtractVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveExtractVector.template", new Dictionary { ["TestName"] = "SveExtractVector_int", ["Isa"] = "Sve", ["Method"] = "ExtractVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveExtractVector.template", new Dictionary { ["TestName"] = "SveExtractVector_long", ["Isa"] = "Sve", ["Method"] = "ExtractVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveExtractVector.template", new Dictionary { ["TestName"] = "SveExtractVector_byte", ["Isa"] = "Sve", ["Method"] = "ExtractVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveExtractVector.template", new Dictionary { ["TestName"] = "SveExtractVector_ushort", ["Isa"] = "Sve", ["Method"] = "ExtractVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveExtractVector.template", new Dictionary { ["TestName"] = "SveExtractVector_uint", ["Isa"] = "Sve", ["Method"] = "ExtractVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveExtractVector.template", new Dictionary { ["TestName"] = "SveExtractVector_ulong", ["Isa"] = "Sve", ["Method"] = "ExtractVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveTestAnyTrue.template", new Dictionary { ["TestName"] = "SveTestAnyTrue_sbyte", ["Isa"] = "Sve", ["Method"] = "TestAnyTrue", ["RetBaseType"] = "bool", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveTestAnyTrue.template", new Dictionary { ["TestName"] = "SveTestAnyTrue_short", ["Isa"] = "Sve", ["Method"] = "TestAnyTrue", ["RetBaseType"] = "bool", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveTestAnyTrue.template", new Dictionary { ["TestName"] = "SveTestAnyTrue_int", ["Isa"] = "Sve", ["Method"] = "TestAnyTrue", ["RetBaseType"] = "bool", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveTestAnyTrue.template", new Dictionary { ["TestName"] = "SveTestAnyTrue_long", ["Isa"] = "Sve", ["Method"] = "TestAnyTrue", ["RetBaseType"] = "bool", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveTestAnyTrue.template", new Dictionary { ["TestName"] = "SveTestAnyTrue_byte", ["Isa"] = "Sve", ["Method"] = "TestAnyTrue", ["RetBaseType"] = "bool", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveTestAnyTrue.template", new Dictionary { ["TestName"] = "SveTestAnyTrue_ushort", ["Isa"] = "Sve", ["Method"] = "TestAnyTrue", ["RetBaseType"] = "bool", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveTestAnyTrue.template", new Dictionary { ["TestName"] = "SveTestAnyTrue_uint", ["Isa"] = "Sve", ["Method"] = "TestAnyTrue", ["RetBaseType"] = "bool", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveTestAnyTrue.template", new Dictionary { ["TestName"] = "SveTestAnyTrue_ulong", ["Isa"] = "Sve", ["Method"] = "TestAnyTrue", ["RetBaseType"] = "bool", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveTestFirstTrue.template", new Dictionary { ["TestName"] = "SveTestFirstTrue_sbyte", ["Isa"] = "Sve", ["Method"] = "TestFirstTrue", ["RetBaseType"] = "bool", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveTestFirstTrue.template", new Dictionary { ["TestName"] = "SveTestFirstTrue_short", ["Isa"] = "Sve", ["Method"] = "TestFirstTrue", ["RetBaseType"] = "bool", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveTestFirstTrue.template", new Dictionary { ["TestName"] = "SveTestFirstTrue_int", ["Isa"] = "Sve", ["Method"] = "TestFirstTrue", ["RetBaseType"] = "bool", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveTestFirstTrue.template", new Dictionary { ["TestName"] = "SveTestFirstTrue_long", ["Isa"] = "Sve", ["Method"] = "TestFirstTrue", ["RetBaseType"] = "bool", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveTestFirstTrue.template", new Dictionary { ["TestName"] = "SveTestFirstTrue_byte", ["Isa"] = "Sve", ["Method"] = "TestFirstTrue", ["RetBaseType"] = "bool", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveTestFirstTrue.template", new Dictionary { ["TestName"] = "SveTestFirstTrue_ushort", ["Isa"] = "Sve", ["Method"] = "TestFirstTrue", ["RetBaseType"] = "bool", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveTestFirstTrue.template", new Dictionary { ["TestName"] = "SveTestFirstTrue_uint", ["Isa"] = "Sve", ["Method"] = "TestFirstTrue", ["RetBaseType"] = "bool", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveTestFirstTrue.template", new Dictionary { ["TestName"] = "SveTestFirstTrue_ulong", ["Isa"] = "Sve", ["Method"] = "TestFirstTrue", ["RetBaseType"] = "bool", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveTestLastTrue.template", new Dictionary { ["TestName"] = "SveTestLastTrue_sbyte", ["Isa"] = "Sve", ["Method"] = "TestLastTrue", ["RetBaseType"] = "bool", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveTestLastTrue.template", new Dictionary { ["TestName"] = "SveTestLastTrue_short", ["Isa"] = "Sve", ["Method"] = "TestLastTrue", ["RetBaseType"] = "bool", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveTestLastTrue.template", new Dictionary { ["TestName"] = "SveTestLastTrue_int", ["Isa"] = "Sve", ["Method"] = "TestLastTrue", ["RetBaseType"] = "bool", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveTestLastTrue.template", new Dictionary { ["TestName"] = "SveTestLastTrue_long", ["Isa"] = "Sve", ["Method"] = "TestLastTrue", ["RetBaseType"] = "bool", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveTestLastTrue.template", new Dictionary { ["TestName"] = "SveTestLastTrue_byte", ["Isa"] = "Sve", ["Method"] = "TestLastTrue", ["RetBaseType"] = "bool", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveTestLastTrue.template", new Dictionary { ["TestName"] = "SveTestLastTrue_ushort", ["Isa"] = "Sve", ["Method"] = "TestLastTrue", ["RetBaseType"] = "bool", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveTestLastTrue.template", new Dictionary { ["TestName"] = "SveTestLastTrue_uint", ["Isa"] = "Sve", ["Method"] = "TestLastTrue", ["RetBaseType"] = "bool", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveTestLastTrue.template", new Dictionary { ["TestName"] = "SveTestLastTrue_ulong", ["Isa"] = "Sve", ["Method"] = "TestLastTrue", ["RetBaseType"] = "bool", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + + +// Sve loads + ("SveCompute16BitAddresses.template", new Dictionary { ["TestName"] = "SveCompute16BitAddresses_uint_int", ["Isa"] = "Sve", ["Method"] = "Compute16BitAddresses", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveCompute16BitAddresses.template", new Dictionary { ["TestName"] = "SveCompute16BitAddresses_uint", ["Isa"] = "Sve", ["Method"] = "Compute16BitAddresses", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveCompute16BitAddresses.template", new Dictionary { ["TestName"] = "SveCompute16BitAddresses_ulong_long", ["Isa"] = "Sve", ["Method"] = "Compute16BitAddresses", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompute16BitAddresses.template", new Dictionary { ["TestName"] = "SveCompute16BitAddresses_ulong", ["Isa"] = "Sve", ["Method"] = "Compute16BitAddresses", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCompute32BitAddresses.template", new Dictionary { ["TestName"] = "SveCompute32BitAddresses_uint_int", ["Isa"] = "Sve", ["Method"] = "Compute32BitAddresses", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveCompute32BitAddresses.template", new Dictionary { ["TestName"] = "SveCompute32BitAddresses_uint", ["Isa"] = "Sve", ["Method"] = "Compute32BitAddresses", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveCompute32BitAddresses.template", new Dictionary { ["TestName"] = "SveCompute32BitAddresses_ulong_long", ["Isa"] = "Sve", ["Method"] = "Compute32BitAddresses", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompute32BitAddresses.template", new Dictionary { ["TestName"] = "SveCompute32BitAddresses_ulong", ["Isa"] = "Sve", ["Method"] = "Compute32BitAddresses", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCompute64BitAddresses.template", new Dictionary { ["TestName"] = "SveCompute64BitAddresses_uint_int", ["Isa"] = "Sve", ["Method"] = "Compute64BitAddresses", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveCompute64BitAddresses.template", new Dictionary { ["TestName"] = "SveCompute64BitAddresses_uint", ["Isa"] = "Sve", ["Method"] = "Compute64BitAddresses", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveCompute64BitAddresses.template", new Dictionary { ["TestName"] = "SveCompute64BitAddresses_ulong_long", ["Isa"] = "Sve", ["Method"] = "Compute64BitAddresses", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompute64BitAddresses.template", new Dictionary { ["TestName"] = "SveCompute64BitAddresses_ulong", ["Isa"] = "Sve", ["Method"] = "Compute64BitAddresses", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveCompute8BitAddresses.template", new Dictionary { ["TestName"] = "SveCompute8BitAddresses_uint_int", ["Isa"] = "Sve", ["Method"] = "Compute8BitAddresses", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveCompute8BitAddresses.template", new Dictionary { ["TestName"] = "SveCompute8BitAddresses_uint", ["Isa"] = "Sve", ["Method"] = "Compute8BitAddresses", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveCompute8BitAddresses.template", new Dictionary { ["TestName"] = "SveCompute8BitAddresses_ulong_long", ["Isa"] = "Sve", ["Method"] = "Compute8BitAddresses", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveCompute8BitAddresses.template", new Dictionary { ["TestName"] = "SveCompute8BitAddresses_ulong", ["Isa"] = "Sve", ["Method"] = "Compute8BitAddresses", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveLoadVector.template", new Dictionary { ["TestName"] = "SveLoadVector_float", ["Isa"] = "Sve", ["Method"] = "LoadVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveLoadVector.template", new Dictionary { ["TestName"] = "SveLoadVector_double", ["Isa"] = "Sve", ["Method"] = "LoadVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveLoadVector.template", new Dictionary { ["TestName"] = "SveLoadVector_sbyte", ["Isa"] = "Sve", ["Method"] = "LoadVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLoadVector.template", new Dictionary { ["TestName"] = "SveLoadVector_short", ["Isa"] = "Sve", ["Method"] = "LoadVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveLoadVector.template", new Dictionary { ["TestName"] = "SveLoadVector_int", ["Isa"] = "Sve", ["Method"] = "LoadVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveLoadVector.template", new Dictionary { ["TestName"] = "SveLoadVector_long", ["Isa"] = "Sve", ["Method"] = "LoadVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveLoadVector.template", new Dictionary { ["TestName"] = "SveLoadVector_byte", ["Isa"] = "Sve", ["Method"] = "LoadVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveLoadVector.template", new Dictionary { ["TestName"] = "SveLoadVector_ushort", ["Isa"] = "Sve", ["Method"] = "LoadVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveLoadVector.template", new Dictionary { ["TestName"] = "SveLoadVector_uint", ["Isa"] = "Sve", ["Method"] = "LoadVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveLoadVector.template", new Dictionary { ["TestName"] = "SveLoadVector_ulong", ["Isa"] = "Sve", ["Method"] = "LoadVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveLoadVector128AndReplicateToVector.template",new Dictionary {["TestName"] = "SveLoadVector128AndReplicateToVector_float", ["Isa"] = "Sve", ["Method"] = "LoadVector128AndReplicateToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveLoadVector128AndReplicateToVector.template",new Dictionary {["TestName"] = "SveLoadVector128AndReplicateToVector_double", ["Isa"] = "Sve", ["Method"] = "LoadVector128AndReplicateToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveLoadVector128AndReplicateToVector.template",new Dictionary {["TestName"] = "SveLoadVector128AndReplicateToVector_sbyte", ["Isa"] = "Sve", ["Method"] = "LoadVector128AndReplicateToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLoadVector128AndReplicateToVector.template",new Dictionary {["TestName"] = "SveLoadVector128AndReplicateToVector_short", ["Isa"] = "Sve", ["Method"] = "LoadVector128AndReplicateToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveLoadVector128AndReplicateToVector.template",new Dictionary {["TestName"] = "SveLoadVector128AndReplicateToVector_int", ["Isa"] = "Sve", ["Method"] = "LoadVector128AndReplicateToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveLoadVector128AndReplicateToVector.template",new Dictionary {["TestName"] = "SveLoadVector128AndReplicateToVector_long", ["Isa"] = "Sve", ["Method"] = "LoadVector128AndReplicateToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveLoadVector128AndReplicateToVector.template",new Dictionary {["TestName"] = "SveLoadVector128AndReplicateToVector_byte", ["Isa"] = "Sve", ["Method"] = "LoadVector128AndReplicateToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveLoadVector128AndReplicateToVector.template",new Dictionary {["TestName"] = "SveLoadVector128AndReplicateToVector_ushort", ["Isa"] = "Sve", ["Method"] = "LoadVector128AndReplicateToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveLoadVector128AndReplicateToVector.template",new Dictionary {["TestName"] = "SveLoadVector128AndReplicateToVector_uint", ["Isa"] = "Sve", ["Method"] = "LoadVector128AndReplicateToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveLoadVector128AndReplicateToVector.template",new Dictionary {["TestName"] = "SveLoadVector128AndReplicateToVector_ulong", ["Isa"] = "Sve", ["Method"] = "LoadVector128AndReplicateToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorByteNonFaultingZeroExtendToInt16.template",new Dictionary {["TestName"] = "SveLoadVectorByteNonFaultingZeroExtendToInt16_short", ["Isa"] = "Sve", ["Method"] = "LoadVectorByteNonFaultingZeroExtendToInt16", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorByteNonFaultingZeroExtendToInt32.template",new Dictionary {["TestName"] = "SveLoadVectorByteNonFaultingZeroExtendToInt32_int", ["Isa"] = "Sve", ["Method"] = "LoadVectorByteNonFaultingZeroExtendToInt32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorByteNonFaultingZeroExtendToInt64.template",new Dictionary {["TestName"] = "SveLoadVectorByteNonFaultingZeroExtendToInt64_long", ["Isa"] = "Sve", ["Method"] = "LoadVectorByteNonFaultingZeroExtendToInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorByteNonFaultingZeroExtendToUInt16.template",new Dictionary {["TestName"] = "SveLoadVectorByteNonFaultingZeroExtendToUInt16_ushort", ["Isa"] = "Sve", ["Method"] = "LoadVectorByteNonFaultingZeroExtendToUInt16", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorByteNonFaultingZeroExtendToUInt32.template",new Dictionary {["TestName"] = "SveLoadVectorByteNonFaultingZeroExtendToUInt32_uint", ["Isa"] = "Sve", ["Method"] = "LoadVectorByteNonFaultingZeroExtendToUInt32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorByteNonFaultingZeroExtendToUInt64.template",new Dictionary {["TestName"] = "SveLoadVectorByteNonFaultingZeroExtendToUInt64_ulong", ["Isa"] = "Sve", ["Method"] = "LoadVectorByteNonFaultingZeroExtendToUInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorByteZeroExtendToInt16.template",new Dictionary {["TestName"] = "SveLoadVectorByteZeroExtendToInt16_short", ["Isa"] = "Sve", ["Method"] = "LoadVectorByteZeroExtendToInt16", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorByteZeroExtendToInt32.template",new Dictionary {["TestName"] = "SveLoadVectorByteZeroExtendToInt32_int", ["Isa"] = "Sve", ["Method"] = "LoadVectorByteZeroExtendToInt32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorByteZeroExtendToInt64.template",new Dictionary {["TestName"] = "SveLoadVectorByteZeroExtendToInt64_long", ["Isa"] = "Sve", ["Method"] = "LoadVectorByteZeroExtendToInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorByteZeroExtendToUInt16.template",new Dictionary {["TestName"] = "SveLoadVectorByteZeroExtendToUInt16_ushort", ["Isa"] = "Sve", ["Method"] = "LoadVectorByteZeroExtendToUInt16", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorByteZeroExtendToUInt32.template",new Dictionary {["TestName"] = "SveLoadVectorByteZeroExtendToUInt32_uint", ["Isa"] = "Sve", ["Method"] = "LoadVectorByteZeroExtendToUInt32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorByteZeroExtendToUInt64.template",new Dictionary {["TestName"] = "SveLoadVectorByteZeroExtendToUInt64_ulong", ["Isa"] = "Sve", ["Method"] = "LoadVectorByteZeroExtendToUInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorInt16NonFaultingSignExtendToInt32.template",new Dictionary {["TestName"] = "SveLoadVectorInt16NonFaultingSignExtendToInt32_int", ["Isa"] = "Sve", ["Method"] = "LoadVectorInt16NonFaultingSignExtendToInt32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorInt16NonFaultingSignExtendToInt64.template",new Dictionary {["TestName"] = "SveLoadVectorInt16NonFaultingSignExtendToInt64_long", ["Isa"] = "Sve", ["Method"] = "LoadVectorInt16NonFaultingSignExtendToInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorInt16NonFaultingSignExtendToUInt32.template",new Dictionary {["TestName"] = "SveLoadVectorInt16NonFaultingSignExtendToUInt32_uint", ["Isa"] = "Sve", ["Method"] = "LoadVectorInt16NonFaultingSignExtendToUInt32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorInt16NonFaultingSignExtendToUInt64.template",new Dictionary {["TestName"] = "SveLoadVectorInt16NonFaultingSignExtendToUInt64_ulong", ["Isa"] = "Sve", ["Method"] = "LoadVectorInt16NonFaultingSignExtendToUInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorInt16SignExtendToInt32.template",new Dictionary {["TestName"] = "SveLoadVectorInt16SignExtendToInt32_int", ["Isa"] = "Sve", ["Method"] = "LoadVectorInt16SignExtendToInt32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorInt16SignExtendToInt64.template",new Dictionary {["TestName"] = "SveLoadVectorInt16SignExtendToInt64_long", ["Isa"] = "Sve", ["Method"] = "LoadVectorInt16SignExtendToInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorInt16SignExtendToUInt32.template",new Dictionary {["TestName"] = "SveLoadVectorInt16SignExtendToUInt32_uint", ["Isa"] = "Sve", ["Method"] = "LoadVectorInt16SignExtendToUInt32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorInt16SignExtendToUInt64.template",new Dictionary {["TestName"] = "SveLoadVectorInt16SignExtendToUInt64_ulong", ["Isa"] = "Sve", ["Method"] = "LoadVectorInt16SignExtendToUInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorInt32NonFaultingSignExtendToInt64.template",new Dictionary {["TestName"] = "SveLoadVectorInt32NonFaultingSignExtendToInt64_long", ["Isa"] = "Sve", ["Method"] = "LoadVectorInt32NonFaultingSignExtendToInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorInt32NonFaultingSignExtendToUInt64.template",new Dictionary {["TestName"] = "SveLoadVectorInt32NonFaultingSignExtendToUInt64_ulong", ["Isa"] = "Sve", ["Method"] = "LoadVectorInt32NonFaultingSignExtendToUInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorInt32SignExtendToInt64.template",new Dictionary {["TestName"] = "SveLoadVectorInt32SignExtendToInt64_long", ["Isa"] = "Sve", ["Method"] = "LoadVectorInt32SignExtendToInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorInt32SignExtendToUInt64.template",new Dictionary {["TestName"] = "SveLoadVectorInt32SignExtendToUInt64_ulong", ["Isa"] = "Sve", ["Method"] = "LoadVectorInt32SignExtendToUInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorNonFaulting.template", new Dictionary { ["TestName"] = "SveLoadVectorNonFaulting_float", ["Isa"] = "Sve", ["Method"] = "LoadVectorNonFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorNonFaulting.template", new Dictionary { ["TestName"] = "SveLoadVectorNonFaulting_double", ["Isa"] = "Sve", ["Method"] = "LoadVectorNonFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorNonFaulting.template", new Dictionary { ["TestName"] = "SveLoadVectorNonFaulting_sbyte", ["Isa"] = "Sve", ["Method"] = "LoadVectorNonFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorNonFaulting.template", new Dictionary { ["TestName"] = "SveLoadVectorNonFaulting_short", ["Isa"] = "Sve", ["Method"] = "LoadVectorNonFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorNonFaulting.template", new Dictionary { ["TestName"] = "SveLoadVectorNonFaulting_int", ["Isa"] = "Sve", ["Method"] = "LoadVectorNonFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorNonFaulting.template", new Dictionary { ["TestName"] = "SveLoadVectorNonFaulting_long", ["Isa"] = "Sve", ["Method"] = "LoadVectorNonFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorNonFaulting.template", new Dictionary { ["TestName"] = "SveLoadVectorNonFaulting_byte", ["Isa"] = "Sve", ["Method"] = "LoadVectorNonFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorNonFaulting.template", new Dictionary { ["TestName"] = "SveLoadVectorNonFaulting_ushort", ["Isa"] = "Sve", ["Method"] = "LoadVectorNonFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorNonFaulting.template", new Dictionary { ["TestName"] = "SveLoadVectorNonFaulting_uint", ["Isa"] = "Sve", ["Method"] = "LoadVectorNonFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorNonFaulting.template", new Dictionary { ["TestName"] = "SveLoadVectorNonFaulting_ulong", ["Isa"] = "Sve", ["Method"] = "LoadVectorNonFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorNonTemporal.template", new Dictionary { ["TestName"] = "SveLoadVectorNonTemporal_float", ["Isa"] = "Sve", ["Method"] = "LoadVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorNonTemporal.template", new Dictionary { ["TestName"] = "SveLoadVectorNonTemporal_double", ["Isa"] = "Sve", ["Method"] = "LoadVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorNonTemporal.template", new Dictionary { ["TestName"] = "SveLoadVectorNonTemporal_sbyte", ["Isa"] = "Sve", ["Method"] = "LoadVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorNonTemporal.template", new Dictionary { ["TestName"] = "SveLoadVectorNonTemporal_short", ["Isa"] = "Sve", ["Method"] = "LoadVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorNonTemporal.template", new Dictionary { ["TestName"] = "SveLoadVectorNonTemporal_int", ["Isa"] = "Sve", ["Method"] = "LoadVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorNonTemporal.template", new Dictionary { ["TestName"] = "SveLoadVectorNonTemporal_long", ["Isa"] = "Sve", ["Method"] = "LoadVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorNonTemporal.template", new Dictionary { ["TestName"] = "SveLoadVectorNonTemporal_byte", ["Isa"] = "Sve", ["Method"] = "LoadVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorNonTemporal.template", new Dictionary { ["TestName"] = "SveLoadVectorNonTemporal_ushort", ["Isa"] = "Sve", ["Method"] = "LoadVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorNonTemporal.template", new Dictionary { ["TestName"] = "SveLoadVectorNonTemporal_uint", ["Isa"] = "Sve", ["Method"] = "LoadVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorNonTemporal.template", new Dictionary { ["TestName"] = "SveLoadVectorNonTemporal_ulong", ["Isa"] = "Sve", ["Method"] = "LoadVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorSByteNonFaultingSignExtendToInt16.template",new Dictionary {["TestName"] = "SveLoadVectorSByteNonFaultingSignExtendToInt16_short", ["Isa"] = "Sve", ["Method"] = "LoadVectorSByteNonFaultingSignExtendToInt16", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorSByteNonFaultingSignExtendToInt32.template",new Dictionary {["TestName"] = "SveLoadVectorSByteNonFaultingSignExtendToInt32_int", ["Isa"] = "Sve", ["Method"] = "LoadVectorSByteNonFaultingSignExtendToInt32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorSByteNonFaultingSignExtendToInt64.template",new Dictionary {["TestName"] = "SveLoadVectorSByteNonFaultingSignExtendToInt64_long", ["Isa"] = "Sve", ["Method"] = "LoadVectorSByteNonFaultingSignExtendToInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorSByteNonFaultingSignExtendToUInt16.template",new Dictionary {["TestName"] = "SveLoadVectorSByteNonFaultingSignExtendToUInt16_ushort", ["Isa"] = "Sve", ["Method"] = "LoadVectorSByteNonFaultingSignExtendToUInt16", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorSByteNonFaultingSignExtendToUInt32.template",new Dictionary {["TestName"] = "SveLoadVectorSByteNonFaultingSignExtendToUInt32_uint", ["Isa"] = "Sve", ["Method"] = "LoadVectorSByteNonFaultingSignExtendToUInt32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorSByteNonFaultingSignExtendToUInt64.template",new Dictionary {["TestName"] = "SveLoadVectorSByteNonFaultingSignExtendToUInt64_ulong", ["Isa"] = "Sve", ["Method"] = "LoadVectorSByteNonFaultingSignExtendToUInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorSByteSignExtendToInt16.template",new Dictionary {["TestName"] = "SveLoadVectorSByteSignExtendToInt16_short", ["Isa"] = "Sve", ["Method"] = "LoadVectorSByteSignExtendToInt16", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorSByteSignExtendToInt32.template",new Dictionary {["TestName"] = "SveLoadVectorSByteSignExtendToInt32_int", ["Isa"] = "Sve", ["Method"] = "LoadVectorSByteSignExtendToInt32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorSByteSignExtendToInt64.template",new Dictionary {["TestName"] = "SveLoadVectorSByteSignExtendToInt64_long", ["Isa"] = "Sve", ["Method"] = "LoadVectorSByteSignExtendToInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorSByteSignExtendToUInt16.template",new Dictionary {["TestName"] = "SveLoadVectorSByteSignExtendToUInt16_ushort", ["Isa"] = "Sve", ["Method"] = "LoadVectorSByteSignExtendToUInt16", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorSByteSignExtendToUInt32.template",new Dictionary {["TestName"] = "SveLoadVectorSByteSignExtendToUInt32_uint", ["Isa"] = "Sve", ["Method"] = "LoadVectorSByteSignExtendToUInt32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorSByteSignExtendToUInt64.template",new Dictionary {["TestName"] = "SveLoadVectorSByteSignExtendToUInt64_ulong", ["Isa"] = "Sve", ["Method"] = "LoadVectorSByteSignExtendToUInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorUInt16NonFaultingZeroExtendToInt32.template",new Dictionary {["TestName"] = "SveLoadVectorUInt16NonFaultingZeroExtendToInt32_int", ["Isa"] = "Sve", ["Method"] = "LoadVectorUInt16NonFaultingZeroExtendToInt32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorUInt16NonFaultingZeroExtendToInt64.template",new Dictionary {["TestName"] = "SveLoadVectorUInt16NonFaultingZeroExtendToInt64_long", ["Isa"] = "Sve", ["Method"] = "LoadVectorUInt16NonFaultingZeroExtendToInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorUInt16NonFaultingZeroExtendToUInt32.template",new Dictionary {["TestName"] = "SveLoadVectorUInt16NonFaultingZeroExtendToUInt32_uint", ["Isa"] = "Sve", ["Method"] = "LoadVectorUInt16NonFaultingZeroExtendToUInt32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorUInt16NonFaultingZeroExtendToUInt64.template",new Dictionary {["TestName"] = "SveLoadVectorUInt16NonFaultingZeroExtendToUInt64_ulong", ["Isa"] = "Sve", ["Method"] = "LoadVectorUInt16NonFaultingZeroExtendToUInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorUInt16ZeroExtendToInt32.template",new Dictionary {["TestName"] = "SveLoadVectorUInt16ZeroExtendToInt32_int", ["Isa"] = "Sve", ["Method"] = "LoadVectorUInt16ZeroExtendToInt32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorUInt16ZeroExtendToInt64.template",new Dictionary {["TestName"] = "SveLoadVectorUInt16ZeroExtendToInt64_long", ["Isa"] = "Sve", ["Method"] = "LoadVectorUInt16ZeroExtendToInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorUInt16ZeroExtendToUInt32.template",new Dictionary {["TestName"] = "SveLoadVectorUInt16ZeroExtendToUInt32_uint", ["Isa"] = "Sve", ["Method"] = "LoadVectorUInt16ZeroExtendToUInt32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorUInt16ZeroExtendToUInt64.template",new Dictionary {["TestName"] = "SveLoadVectorUInt16ZeroExtendToUInt64_ulong", ["Isa"] = "Sve", ["Method"] = "LoadVectorUInt16ZeroExtendToUInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorUInt32NonFaultingZeroExtendToInt64.template",new Dictionary {["TestName"] = "SveLoadVectorUInt32NonFaultingZeroExtendToInt64_long", ["Isa"] = "Sve", ["Method"] = "LoadVectorUInt32NonFaultingZeroExtendToInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorUInt32NonFaultingZeroExtendToUInt64.template",new Dictionary {["TestName"] = "SveLoadVectorUInt32NonFaultingZeroExtendToUInt64_ulong", ["Isa"] = "Sve", ["Method"] = "LoadVectorUInt32NonFaultingZeroExtendToUInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorUInt32ZeroExtendToInt64.template",new Dictionary {["TestName"] = "SveLoadVectorUInt32ZeroExtendToInt64_long", ["Isa"] = "Sve", ["Method"] = "LoadVectorUInt32ZeroExtendToInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorUInt32ZeroExtendToUInt64.template",new Dictionary {["TestName"] = "SveLoadVectorUInt32ZeroExtendToUInt64_ulong", ["Isa"] = "Sve", ["Method"] = "LoadVectorUInt32ZeroExtendToUInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx2.template", new Dictionary { ["TestName"] = "SveLoadVectorx2_float", ["Isa"] = "Sve", ["Method"] = "LoadVectorx2", ["RetVectorType"] = "Vector_Vector",["RetBaseType"] = "Single_Single",["Op1VectorType"] = "Vector",["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx2.template", new Dictionary { ["TestName"] = "SveLoadVectorx2_double", ["Isa"] = "Sve", ["Method"] = "LoadVectorx2", ["RetVectorType"] = "Vector_Vector",["RetBaseType"] = "Double_Double",["Op1VectorType"] = "Vector",["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx2.template", new Dictionary { ["TestName"] = "SveLoadVectorx2_sbyte", ["Isa"] = "Sve", ["Method"] = "LoadVectorx2", ["RetVectorType"] = "Vector_Vector",["RetBaseType"] = "SByte_SByte",["Op1VectorType"] = "Vector",["Op1BaseType"] = "SByte", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx2.template", new Dictionary { ["TestName"] = "SveLoadVectorx2_short", ["Isa"] = "Sve", ["Method"] = "LoadVectorx2", ["RetVectorType"] = "Vector_Vector",["RetBaseType"] = "Int16_Int16",["Op1VectorType"] = "Vector",["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx2.template", new Dictionary { ["TestName"] = "SveLoadVectorx2_int", ["Isa"] = "Sve", ["Method"] = "LoadVectorx2", ["RetVectorType"] = "Vector_Vector",["RetBaseType"] = "Int32_Int32",["Op1VectorType"] = "Vector",["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx2.template", new Dictionary { ["TestName"] = "SveLoadVectorx2_long", ["Isa"] = "Sve", ["Method"] = "LoadVectorx2", ["RetVectorType"] = "Vector_Vector",["RetBaseType"] = "Int64_Int64",["Op1VectorType"] = "Vector",["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx2.template", new Dictionary { ["TestName"] = "SveLoadVectorx2_byte", ["Isa"] = "Sve", ["Method"] = "LoadVectorx2", ["RetVectorType"] = "Vector_Vector",["RetBaseType"] = "Byte_Byte",["Op1VectorType"] = "Vector",["Op1BaseType"] = "Byte", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx2.template", new Dictionary { ["TestName"] = "SveLoadVectorx2_ushort", ["Isa"] = "Sve", ["Method"] = "LoadVectorx2", ["RetVectorType"] = "Vector_Vector",["RetBaseType"] = "UInt16_UInt16",["Op1VectorType"] = "Vector",["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx2.template", new Dictionary { ["TestName"] = "SveLoadVectorx2_uint", ["Isa"] = "Sve", ["Method"] = "LoadVectorx2", ["RetVectorType"] = "Vector_Vector",["RetBaseType"] = "UInt32_UInt32",["Op1VectorType"] = "Vector",["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx2.template", new Dictionary { ["TestName"] = "SveLoadVectorx2_ulong", ["Isa"] = "Sve", ["Method"] = "LoadVectorx2", ["RetVectorType"] = "Vector_Vector",["RetBaseType"] = "UInt64_UInt64",["Op1VectorType"] = "Vector",["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx3.template", new Dictionary { ["TestName"] = "SveLoadVectorx3_float", ["Isa"] = "Sve", ["Method"] = "LoadVectorx3", ["RetVectorType"] = "Vector_Vector_Vector",["RetBaseType"] = "Single_Single_Single",["Op1VectorType"] = "Vector",["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx3.template", new Dictionary { ["TestName"] = "SveLoadVectorx3_double", ["Isa"] = "Sve", ["Method"] = "LoadVectorx3", ["RetVectorType"] = "Vector_Vector_Vector",["RetBaseType"] = "Double_Double_Double",["Op1VectorType"] = "Vector",["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx3.template", new Dictionary { ["TestName"] = "SveLoadVectorx3_sbyte", ["Isa"] = "Sve", ["Method"] = "LoadVectorx3", ["RetVectorType"] = "Vector_Vector_Vector",["RetBaseType"] = "SByte_SByte_SByte",["Op1VectorType"] = "Vector",["Op1BaseType"] = "SByte", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx3.template", new Dictionary { ["TestName"] = "SveLoadVectorx3_short", ["Isa"] = "Sve", ["Method"] = "LoadVectorx3", ["RetVectorType"] = "Vector_Vector_Vector",["RetBaseType"] = "Int16_Int16_Int16",["Op1VectorType"] = "Vector",["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx3.template", new Dictionary { ["TestName"] = "SveLoadVectorx3_int", ["Isa"] = "Sve", ["Method"] = "LoadVectorx3", ["RetVectorType"] = "Vector_Vector_Vector",["RetBaseType"] = "Int32_Int32_Int32",["Op1VectorType"] = "Vector",["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx3.template", new Dictionary { ["TestName"] = "SveLoadVectorx3_long", ["Isa"] = "Sve", ["Method"] = "LoadVectorx3", ["RetVectorType"] = "Vector_Vector_Vector",["RetBaseType"] = "Int64_Int64_Int64",["Op1VectorType"] = "Vector",["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx3.template", new Dictionary { ["TestName"] = "SveLoadVectorx3_byte", ["Isa"] = "Sve", ["Method"] = "LoadVectorx3", ["RetVectorType"] = "Vector_Vector_Vector",["RetBaseType"] = "Byte_Byte_Byte",["Op1VectorType"] = "Vector",["Op1BaseType"] = "Byte", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx3.template", new Dictionary { ["TestName"] = "SveLoadVectorx3_ushort", ["Isa"] = "Sve", ["Method"] = "LoadVectorx3", ["RetVectorType"] = "Vector_Vector_Vector",["RetBaseType"] = "UInt16_UInt16_UInt16",["Op1VectorType"] = "Vector",["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx3.template", new Dictionary { ["TestName"] = "SveLoadVectorx3_uint", ["Isa"] = "Sve", ["Method"] = "LoadVectorx3", ["RetVectorType"] = "Vector_Vector_Vector",["RetBaseType"] = "UInt32_UInt32_UInt32",["Op1VectorType"] = "Vector",["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx3.template", new Dictionary { ["TestName"] = "SveLoadVectorx3_ulong", ["Isa"] = "Sve", ["Method"] = "LoadVectorx3", ["RetVectorType"] = "Vector_Vector_Vector",["RetBaseType"] = "UInt64_UInt64_UInt64",["Op1VectorType"] = "Vector",["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx4.template", new Dictionary { ["TestName"] = "SveLoadVectorx4_float", ["Isa"] = "Sve", ["Method"] = "LoadVectorx4", ["RetVectorType"] = "Vector_Vector_Vector_Vector",["RetBaseType"] = "Single_Single_Single_Single",["Op1VectorType"] = "Vector",["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx4.template", new Dictionary { ["TestName"] = "SveLoadVectorx4_double", ["Isa"] = "Sve", ["Method"] = "LoadVectorx4", ["RetVectorType"] = "Vector_Vector_Vector_Vector",["RetBaseType"] = "Double_Double_Double_Double",["Op1VectorType"] = "Vector",["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx4.template", new Dictionary { ["TestName"] = "SveLoadVectorx4_sbyte", ["Isa"] = "Sve", ["Method"] = "LoadVectorx4", ["RetVectorType"] = "Vector_Vector_Vector_Vector",["RetBaseType"] = "SByte_SByte_SByte_SByte",["Op1VectorType"] = "Vector",["Op1BaseType"] = "SByte", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx4.template", new Dictionary { ["TestName"] = "SveLoadVectorx4_short", ["Isa"] = "Sve", ["Method"] = "LoadVectorx4", ["RetVectorType"] = "Vector_Vector_Vector_Vector",["RetBaseType"] = "Int16_Int16_Int16_Int16",["Op1VectorType"] = "Vector",["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx4.template", new Dictionary { ["TestName"] = "SveLoadVectorx4_int", ["Isa"] = "Sve", ["Method"] = "LoadVectorx4", ["RetVectorType"] = "Vector_Vector_Vector_Vector",["RetBaseType"] = "Int32_Int32_Int32_Int32",["Op1VectorType"] = "Vector",["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx4.template", new Dictionary { ["TestName"] = "SveLoadVectorx4_long", ["Isa"] = "Sve", ["Method"] = "LoadVectorx4", ["RetVectorType"] = "Vector_Vector_Vector_Vector",["RetBaseType"] = "Int64_Int64_Int64_Int64",["Op1VectorType"] = "Vector",["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx4.template", new Dictionary { ["TestName"] = "SveLoadVectorx4_byte", ["Isa"] = "Sve", ["Method"] = "LoadVectorx4", ["RetVectorType"] = "Vector_Vector_Vector_Vector",["RetBaseType"] = "Byte_Byte_Byte_Byte",["Op1VectorType"] = "Vector",["Op1BaseType"] = "Byte", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx4.template", new Dictionary { ["TestName"] = "SveLoadVectorx4_ushort", ["Isa"] = "Sve", ["Method"] = "LoadVectorx4", ["RetVectorType"] = "Vector_Vector_Vector_Vector",["RetBaseType"] = "UInt16_UInt16_UInt16_UInt16",["Op1VectorType"] = "Vector",["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx4.template", new Dictionary { ["TestName"] = "SveLoadVectorx4_uint", ["Isa"] = "Sve", ["Method"] = "LoadVectorx4", ["RetVectorType"] = "Vector_Vector_Vector_Vector",["RetBaseType"] = "UInt32_UInt32_UInt32_UInt32",["Op1VectorType"] = "Vector",["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorx4.template", new Dictionary { ["TestName"] = "SveLoadVectorx4_ulong", ["Isa"] = "Sve", ["Method"] = "LoadVectorx4", ["RetVectorType"] = "Vector_Vector_Vector_Vector",["RetBaseType"] = "UInt64_UInt64_UInt64_UInt64",["Op1VectorType"] = "Vector",["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SvePrefetchBytes.template", new Dictionary { ["TestName"] = "SvePrefetchBytes_byte", ["Isa"] = "Sve", ["Method"] = "PrefetchBytes", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2BaseType"] = "void", ["Op3BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SvePrefetchInt16.template", new Dictionary { ["TestName"] = "SvePrefetchInt16_ushort", ["Isa"] = "Sve", ["Method"] = "PrefetchInt16", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "void", ["Op3BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SvePrefetchInt32.template", new Dictionary { ["TestName"] = "SvePrefetchInt32_uint", ["Isa"] = "Sve", ["Method"] = "PrefetchInt32", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "void", ["Op3BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SvePrefetchInt64.template", new Dictionary { ["TestName"] = "SvePrefetchInt64_ulong", ["Isa"] = "Sve", ["Method"] = "PrefetchInt64", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "void", ["Op3BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + + +// Sve gatherloads + ("SveGatherPrefetch16Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch16Bit_ushort_uint", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch16Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch16Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch16Bit_short_uint", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch16Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch16Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch16Bit_ushort_ulong", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch16Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch16Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch16Bit_short_ulong", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch16Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch16Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch16Bit_ushort_int", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch16Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch16Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch16Bit_short_int", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch16Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch16Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch16Bit_ushort_uint", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch16Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch16Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch16Bit_short_uint", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch16Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch16Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch16Bit_ushort_long", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch16Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch16Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch16Bit_short_long", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch16Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch16Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch16Bit_ushort_ulong", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch16Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch16Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch16Bit_short_ulong", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch16Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch32Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch32Bit_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch32Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch32Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch32Bit_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch32Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch32Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch32Bit_uint_ulong", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch32Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch32Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch32Bit_int_ulong", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch32Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch32Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch32Bit_uint_int", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch32Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch32Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch32Bit_int_int", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch32Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch32Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch32Bit_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch32Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch32Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch32Bit_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch32Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch32Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch32Bit_uint_long", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch32Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch32Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch32Bit_int_long", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch32Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch32Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch32Bit_uint_ulong", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch32Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch32Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch32Bit_int_ulong", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch32Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch64Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch64Bit_ulong_uint", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch64Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch64Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch64Bit_long_uint", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch64Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch64Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch64Bit_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch64Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch64Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch64Bit_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch64Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch64Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch64Bit_ulong_int", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch64Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch64Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch64Bit_long_int", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch64Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch64Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch64Bit_ulong_uint", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch64Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch64Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch64Bit_long_uint", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch64Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch64Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch64Bit_ulong_long", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch64Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch64Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch64Bit_long_long", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch64Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch64Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch64Bit_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch64Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch64Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch64Bit_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch64Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch8Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch8Bit_byte_uint", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch8Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch8Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch8Bit_sbyte_uint", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch8Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch8Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch8Bit_byte_ulong", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch8Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch8Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch8Bit_sbyte_ulong", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch8Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch8Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch8Bit_byte_int", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch8Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch8Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch8Bit_sbyte_int", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch8Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch8Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch8Bit_byte_uint", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch8Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch8Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch8Bit_sbyte_uint", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch8Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch8Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch8Bit_byte_long", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch8Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch8Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch8Bit_sbyte_long", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch8Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch8Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch8Bit_byte_ulong", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch8Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherPrefetch8Bit.template", new Dictionary { ["TestName"] = "SveGatherPrefetch8Bit_sbyte_ulong", ["Isa"] = "Sve", ["Method"] = "GatherPrefetch8Bit", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2BaseType"] = "void", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4BaseType"] = "SvePrefetchType",["LargestVectorSize"] = "8",}), + ("SveGatherVector.template", new Dictionary { ["TestName"] = "SveGatherVector_float_uint", ["Isa"] = "Sve", ["Method"] = "GatherVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVector.template", new Dictionary { ["TestName"] = "SveGatherVector_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVector.template", new Dictionary { ["TestName"] = "SveGatherVector_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVector.template", new Dictionary { ["TestName"] = "SveGatherVector_double_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVector.template", new Dictionary { ["TestName"] = "SveGatherVector_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVector.template", new Dictionary { ["TestName"] = "SveGatherVector_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVector.template", new Dictionary { ["TestName"] = "SveGatherVector_float_int", ["Isa"] = "Sve", ["Method"] = "GatherVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVector.template", new Dictionary { ["TestName"] = "SveGatherVector_int_int", ["Isa"] = "Sve", ["Method"] = "GatherVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVector.template", new Dictionary { ["TestName"] = "SveGatherVector_uint_int", ["Isa"] = "Sve", ["Method"] = "GatherVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVector.template", new Dictionary { ["TestName"] = "SveGatherVector_float_uint", ["Isa"] = "Sve", ["Method"] = "GatherVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVector.template", new Dictionary { ["TestName"] = "SveGatherVector_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVector.template", new Dictionary { ["TestName"] = "SveGatherVector_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVector.template", new Dictionary { ["TestName"] = "SveGatherVector_double_long", ["Isa"] = "Sve", ["Method"] = "GatherVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVector.template", new Dictionary { ["TestName"] = "SveGatherVector_long_long", ["Isa"] = "Sve", ["Method"] = "GatherVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVector.template", new Dictionary { ["TestName"] = "SveGatherVector_ulong_long", ["Isa"] = "Sve", ["Method"] = "GatherVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVector.template", new Dictionary { ["TestName"] = "SveGatherVector_double_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVector.template", new Dictionary { ["TestName"] = "SveGatherVector_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVector.template", new Dictionary { ["TestName"] = "SveGatherVector_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorByteZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorByteZeroExtend_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorByteZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorByteZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorByteZeroExtend_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorByteZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorByteZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorByteZeroExtend_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorByteZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorByteZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorByteZeroExtend_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorByteZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorByteZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorByteZeroExtend_int_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorByteZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorByteZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorByteZeroExtend_uint_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorByteZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorByteZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorByteZeroExtend_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorByteZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorByteZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorByteZeroExtend_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorByteZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorByteZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorByteZeroExtend_long_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorByteZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorByteZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorByteZeroExtend_ulong_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorByteZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorByteZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorByteZeroExtend_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorByteZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorByteZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorByteZeroExtend_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorByteZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16SignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt16SignExtend_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16SignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16SignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt16SignExtend_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16SignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16SignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt16SignExtend_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16SignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16SignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt16SignExtend_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16SignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16SignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt16SignExtend_int_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16SignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16SignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt16SignExtend_uint_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16SignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16SignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt16SignExtend_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16SignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16SignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt16SignExtend_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16SignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16SignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt16SignExtend_long_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16SignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16SignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt16SignExtend_ulong_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16SignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16SignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt16SignExtend_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16SignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16SignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt16SignExtend_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16SignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16WithByteOffsetsSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt16WithByteOffsetsSignExtend_int_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16WithByteOffsetsSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16WithByteOffsetsSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt16WithByteOffsetsSignExtend_uint_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16WithByteOffsetsSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16WithByteOffsetsSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt16WithByteOffsetsSignExtend_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16WithByteOffsetsSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16WithByteOffsetsSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt16WithByteOffsetsSignExtend_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16WithByteOffsetsSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16WithByteOffsetsSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt16WithByteOffsetsSignExtend_long_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16WithByteOffsetsSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16WithByteOffsetsSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt16WithByteOffsetsSignExtend_ulong_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16WithByteOffsetsSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16WithByteOffsetsSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt16WithByteOffsetsSignExtend_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16WithByteOffsetsSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16WithByteOffsetsSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt16WithByteOffsetsSignExtend_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16WithByteOffsetsSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32SignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt32SignExtend_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32SignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32SignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt32SignExtend_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32SignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32SignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt32SignExtend_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32SignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32SignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt32SignExtend_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32SignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32SignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt32SignExtend_long_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32SignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32SignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt32SignExtend_int_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32SignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32SignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt32SignExtend_ulong_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32SignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32SignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt32SignExtend_uint_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32SignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32SignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt32SignExtend_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32SignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32SignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt32SignExtend_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32SignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32SignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt32SignExtend_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32SignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32SignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt32SignExtend_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32SignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32WithByteOffsetsSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt32WithByteOffsetsSignExtend_long_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32WithByteOffsetsSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32WithByteOffsetsSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt32WithByteOffsetsSignExtend_int_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32WithByteOffsetsSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32WithByteOffsetsSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt32WithByteOffsetsSignExtend_ulong_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32WithByteOffsetsSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32WithByteOffsetsSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt32WithByteOffsetsSignExtend_uint_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32WithByteOffsetsSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32WithByteOffsetsSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt32WithByteOffsetsSignExtend_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32WithByteOffsetsSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32WithByteOffsetsSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt32WithByteOffsetsSignExtend_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32WithByteOffsetsSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32WithByteOffsetsSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt32WithByteOffsetsSignExtend_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32WithByteOffsetsSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32WithByteOffsetsSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorInt32WithByteOffsetsSignExtend_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32WithByteOffsetsSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorSByteSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorSByteSignExtend_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorSByteSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorSByteSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorSByteSignExtend_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorSByteSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorSByteSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorSByteSignExtend_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorSByteSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorSByteSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorSByteSignExtend_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorSByteSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorSByteSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorSByteSignExtend_int_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorSByteSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorSByteSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorSByteSignExtend_uint_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorSByteSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorSByteSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorSByteSignExtend_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorSByteSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorSByteSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorSByteSignExtend_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorSByteSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorSByteSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorSByteSignExtend_long_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorSByteSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorSByteSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorSByteSignExtend_ulong_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorSByteSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorSByteSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorSByteSignExtend_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorSByteSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorSByteSignExtend.template",new Dictionary {["TestName"] = "SveGatherVectorSByteSignExtend_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorSByteSignExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16WithByteOffsetsZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16WithByteOffsetsZeroExtend_int_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16WithByteOffsetsZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16WithByteOffsetsZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16WithByteOffsetsZeroExtend_uint_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16WithByteOffsetsZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16WithByteOffsetsZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16WithByteOffsetsZeroExtend_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16WithByteOffsetsZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16WithByteOffsetsZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16WithByteOffsetsZeroExtend_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16WithByteOffsetsZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16WithByteOffsetsZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16WithByteOffsetsZeroExtend_long_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16WithByteOffsetsZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16WithByteOffsetsZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16WithByteOffsetsZeroExtend_ulong_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16WithByteOffsetsZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16WithByteOffsetsZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16WithByteOffsetsZeroExtend_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16WithByteOffsetsZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16WithByteOffsetsZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16WithByteOffsetsZeroExtend_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16WithByteOffsetsZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16ZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16ZeroExtend_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16ZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16ZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16ZeroExtend_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16ZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16ZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16ZeroExtend_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16ZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16ZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16ZeroExtend_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16ZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16ZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16ZeroExtend_int_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16ZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16ZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16ZeroExtend_uint_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16ZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16ZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16ZeroExtend_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16ZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16ZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16ZeroExtend_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16ZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16ZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16ZeroExtend_long_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16ZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16ZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16ZeroExtend_ulong_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16ZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16ZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16ZeroExtend_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16ZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16ZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16ZeroExtend_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16ZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32WithByteOffsetsZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32WithByteOffsetsZeroExtend_long_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32WithByteOffsetsZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32WithByteOffsetsZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32WithByteOffsetsZeroExtend_int_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32WithByteOffsetsZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32WithByteOffsetsZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32WithByteOffsetsZeroExtend_ulong_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32WithByteOffsetsZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32WithByteOffsetsZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32WithByteOffsetsZeroExtend_uint_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32WithByteOffsetsZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32WithByteOffsetsZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32WithByteOffsetsZeroExtend_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32WithByteOffsetsZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32WithByteOffsetsZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32WithByteOffsetsZeroExtend_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32WithByteOffsetsZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32WithByteOffsetsZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32WithByteOffsetsZeroExtend_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32WithByteOffsetsZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32WithByteOffsetsZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32WithByteOffsetsZeroExtend_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32WithByteOffsetsZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32ZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32ZeroExtend_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32ZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32ZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32ZeroExtend_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32ZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32ZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32ZeroExtend_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32ZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32ZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32ZeroExtend_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32ZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32ZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32ZeroExtend_long_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32ZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32ZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32ZeroExtend_int_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32ZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32ZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32ZeroExtend_ulong_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32ZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32ZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32ZeroExtend_uint_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32ZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32ZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32ZeroExtend_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32ZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32ZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32ZeroExtend_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32ZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32ZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32ZeroExtend_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32ZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32ZeroExtend.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32ZeroExtend_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32ZeroExtend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorWithByteOffsets.template",new Dictionary {["TestName"] = "SveGatherVectorWithByteOffsets_float_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorWithByteOffsets", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorWithByteOffsets.template",new Dictionary {["TestName"] = "SveGatherVectorWithByteOffsets_int_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorWithByteOffsets", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorWithByteOffsets.template",new Dictionary {["TestName"] = "SveGatherVectorWithByteOffsets_uint_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorWithByteOffsets", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorWithByteOffsets.template",new Dictionary {["TestName"] = "SveGatherVectorWithByteOffsets_float_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorWithByteOffsets", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorWithByteOffsets.template",new Dictionary {["TestName"] = "SveGatherVectorWithByteOffsets_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorWithByteOffsets", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorWithByteOffsets.template",new Dictionary {["TestName"] = "SveGatherVectorWithByteOffsets_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorWithByteOffsets", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorWithByteOffsets.template",new Dictionary {["TestName"] = "SveGatherVectorWithByteOffsets_double_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorWithByteOffsets", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorWithByteOffsets.template",new Dictionary {["TestName"] = "SveGatherVectorWithByteOffsets_long_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorWithByteOffsets", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorWithByteOffsets.template",new Dictionary {["TestName"] = "SveGatherVectorWithByteOffsets_ulong_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorWithByteOffsets", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorWithByteOffsets.template",new Dictionary {["TestName"] = "SveGatherVectorWithByteOffsets_double_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorWithByteOffsets", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorWithByteOffsets.template",new Dictionary {["TestName"] = "SveGatherVectorWithByteOffsets_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorWithByteOffsets", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorWithByteOffsets.template",new Dictionary {["TestName"] = "SveGatherVectorWithByteOffsets_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorWithByteOffsets", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + + +// Sve fp + ("SveAddRotateComplex.template", new Dictionary { ["TestName"] = "SveAddRotateComplex_float", ["Isa"] = "Sve", ["Method"] = "AddRotateComplex", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveAddRotateComplex.template", new Dictionary { ["TestName"] = "SveAddRotateComplex_double", ["Isa"] = "Sve", ["Method"] = "AddRotateComplex", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveAddSequentialAcross.template", new Dictionary { ["TestName"] = "SveAddSequentialAcross_float", ["Isa"] = "Sve", ["Method"] = "AddSequentialAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveAddSequentialAcross.template", new Dictionary { ["TestName"] = "SveAddSequentialAcross_double", ["Isa"] = "Sve", ["Method"] = "AddSequentialAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveConvertToDouble.template", new Dictionary { ["TestName"] = "SveConvertToDouble_double_float", ["Isa"] = "Sve", ["Method"] = "ConvertToDouble", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveConvertToDouble.template", new Dictionary { ["TestName"] = "SveConvertToDouble_double_int", ["Isa"] = "Sve", ["Method"] = "ConvertToDouble", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveConvertToDouble.template", new Dictionary { ["TestName"] = "SveConvertToDouble_double_long", ["Isa"] = "Sve", ["Method"] = "ConvertToDouble", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveConvertToDouble.template", new Dictionary { ["TestName"] = "SveConvertToDouble_double_uint", ["Isa"] = "Sve", ["Method"] = "ConvertToDouble", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveConvertToDouble.template", new Dictionary { ["TestName"] = "SveConvertToDouble_double_ulong", ["Isa"] = "Sve", ["Method"] = "ConvertToDouble", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveConvertToInt32.template", new Dictionary { ["TestName"] = "SveConvertToInt32_int_float", ["Isa"] = "Sve", ["Method"] = "ConvertToInt32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveConvertToInt32.template", new Dictionary { ["TestName"] = "SveConvertToInt32_int_double", ["Isa"] = "Sve", ["Method"] = "ConvertToInt32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveConvertToInt64.template", new Dictionary { ["TestName"] = "SveConvertToInt64_long_float", ["Isa"] = "Sve", ["Method"] = "ConvertToInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveConvertToInt64.template", new Dictionary { ["TestName"] = "SveConvertToInt64_long_double", ["Isa"] = "Sve", ["Method"] = "ConvertToInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveConvertToSingle.template", new Dictionary { ["TestName"] = "SveConvertToSingle_float_double", ["Isa"] = "Sve", ["Method"] = "ConvertToSingle", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveConvertToSingle.template", new Dictionary { ["TestName"] = "SveConvertToSingle_float_int", ["Isa"] = "Sve", ["Method"] = "ConvertToSingle", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveConvertToSingle.template", new Dictionary { ["TestName"] = "SveConvertToSingle_float_long", ["Isa"] = "Sve", ["Method"] = "ConvertToSingle", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveConvertToSingle.template", new Dictionary { ["TestName"] = "SveConvertToSingle_float_uint", ["Isa"] = "Sve", ["Method"] = "ConvertToSingle", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveConvertToSingle.template", new Dictionary { ["TestName"] = "SveConvertToSingle_float_ulong", ["Isa"] = "Sve", ["Method"] = "ConvertToSingle", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveConvertToUInt32.template", new Dictionary { ["TestName"] = "SveConvertToUInt32_uint_float", ["Isa"] = "Sve", ["Method"] = "ConvertToUInt32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveConvertToUInt32.template", new Dictionary { ["TestName"] = "SveConvertToUInt32_uint_double", ["Isa"] = "Sve", ["Method"] = "ConvertToUInt32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveConvertToUInt64.template", new Dictionary { ["TestName"] = "SveConvertToUInt64_ulong_float", ["Isa"] = "Sve", ["Method"] = "ConvertToUInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveConvertToUInt64.template", new Dictionary { ["TestName"] = "SveConvertToUInt64_ulong_double", ["Isa"] = "Sve", ["Method"] = "ConvertToUInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveFloatingPointExponentialAccelerator.template",new Dictionary {["TestName"] = "SveFloatingPointExponentialAccelerator_float_uint", ["Isa"] = "Sve", ["Method"] = "FloatingPointExponentialAccelerator", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveFloatingPointExponentialAccelerator.template",new Dictionary {["TestName"] = "SveFloatingPointExponentialAccelerator_double_ulong", ["Isa"] = "Sve", ["Method"] = "FloatingPointExponentialAccelerator", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveMultiplyAddRotateComplex.template",new Dictionary {["TestName"] = "SveMultiplyAddRotateComplex_float", ["Isa"] = "Sve", ["Method"] = "MultiplyAddRotateComplex", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Single",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("SveMultiplyAddRotateComplex.template",new Dictionary {["TestName"] = "SveMultiplyAddRotateComplex_double", ["Isa"] = "Sve", ["Method"] = "MultiplyAddRotateComplex", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Double",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("SveMultiplyAddRotateComplexBySelectedScalar.template",new Dictionary {["TestName"] = "SveMultiplyAddRotateComplexBySelectedScalar_float", ["Isa"] = "Sve", ["Method"] = "MultiplyAddRotateComplexBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Single",["Op4BaseType"] = "Byte",["Op5BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("SveReciprocalEstimate.template", new Dictionary { ["TestName"] = "SveReciprocalEstimate_float", ["Isa"] = "Sve", ["Method"] = "ReciprocalEstimate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveReciprocalEstimate.template", new Dictionary { ["TestName"] = "SveReciprocalEstimate_double", ["Isa"] = "Sve", ["Method"] = "ReciprocalEstimate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveReciprocalExponent.template", new Dictionary { ["TestName"] = "SveReciprocalExponent_float", ["Isa"] = "Sve", ["Method"] = "ReciprocalExponent", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveReciprocalExponent.template", new Dictionary { ["TestName"] = "SveReciprocalExponent_double", ["Isa"] = "Sve", ["Method"] = "ReciprocalExponent", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveReciprocalSqrtEstimate.template",new Dictionary { ["TestName"] = "SveReciprocalSqrtEstimate_float", ["Isa"] = "Sve", ["Method"] = "ReciprocalSqrtEstimate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveReciprocalSqrtEstimate.template",new Dictionary { ["TestName"] = "SveReciprocalSqrtEstimate_double", ["Isa"] = "Sve", ["Method"] = "ReciprocalSqrtEstimate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveReciprocalSqrtStep.template", new Dictionary { ["TestName"] = "SveReciprocalSqrtStep_float", ["Isa"] = "Sve", ["Method"] = "ReciprocalSqrtStep", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveReciprocalSqrtStep.template", new Dictionary { ["TestName"] = "SveReciprocalSqrtStep_double", ["Isa"] = "Sve", ["Method"] = "ReciprocalSqrtStep", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveReciprocalStep.template", new Dictionary { ["TestName"] = "SveReciprocalStep_float", ["Isa"] = "Sve", ["Method"] = "ReciprocalStep", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveReciprocalStep.template", new Dictionary { ["TestName"] = "SveReciprocalStep_double", ["Isa"] = "Sve", ["Method"] = "ReciprocalStep", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveRoundAwayFromZero.template", new Dictionary { ["TestName"] = "SveRoundAwayFromZero_float", ["Isa"] = "Sve", ["Method"] = "RoundAwayFromZero", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveRoundAwayFromZero.template", new Dictionary { ["TestName"] = "SveRoundAwayFromZero_double", ["Isa"] = "Sve", ["Method"] = "RoundAwayFromZero", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveRoundToNearest.template", new Dictionary { ["TestName"] = "SveRoundToNearest_float", ["Isa"] = "Sve", ["Method"] = "RoundToNearest", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveRoundToNearest.template", new Dictionary { ["TestName"] = "SveRoundToNearest_double", ["Isa"] = "Sve", ["Method"] = "RoundToNearest", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveRoundToNegativeInfinity.template",new Dictionary {["TestName"] = "SveRoundToNegativeInfinity_float", ["Isa"] = "Sve", ["Method"] = "RoundToNegativeInfinity", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveRoundToNegativeInfinity.template",new Dictionary {["TestName"] = "SveRoundToNegativeInfinity_double", ["Isa"] = "Sve", ["Method"] = "RoundToNegativeInfinity", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveRoundToPositiveInfinity.template",new Dictionary {["TestName"] = "SveRoundToPositiveInfinity_float", ["Isa"] = "Sve", ["Method"] = "RoundToPositiveInfinity", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveRoundToPositiveInfinity.template",new Dictionary {["TestName"] = "SveRoundToPositiveInfinity_double", ["Isa"] = "Sve", ["Method"] = "RoundToPositiveInfinity", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveRoundToZero.template", new Dictionary { ["TestName"] = "SveRoundToZero_float", ["Isa"] = "Sve", ["Method"] = "RoundToZero", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveRoundToZero.template", new Dictionary { ["TestName"] = "SveRoundToZero_double", ["Isa"] = "Sve", ["Method"] = "RoundToZero", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveScale.template", new Dictionary { ["TestName"] = "SveScale_float_int", ["Isa"] = "Sve", ["Method"] = "Scale", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveScale.template", new Dictionary { ["TestName"] = "SveScale_double_long", ["Isa"] = "Sve", ["Method"] = "Scale", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveSqrt.template", new Dictionary { ["TestName"] = "SveSqrt_float", ["Isa"] = "Sve", ["Method"] = "Sqrt", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveSqrt.template", new Dictionary { ["TestName"] = "SveSqrt_double", ["Isa"] = "Sve", ["Method"] = "Sqrt", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveTrigonometricMultiplyAddCoefficient.template",new Dictionary {["TestName"] = "SveTrigonometricMultiplyAddCoefficient_float", ["Isa"] = "Sve", ["Method"] = "TrigonometricMultiplyAddCoefficient", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveTrigonometricMultiplyAddCoefficient.template",new Dictionary {["TestName"] = "SveTrigonometricMultiplyAddCoefficient_double", ["Isa"] = "Sve", ["Method"] = "TrigonometricMultiplyAddCoefficient", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveTrigonometricSelectCoefficient.template",new Dictionary {["TestName"] = "SveTrigonometricSelectCoefficient_float_uint", ["Isa"] = "Sve", ["Method"] = "TrigonometricSelectCoefficient", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveTrigonometricSelectCoefficient.template",new Dictionary {["TestName"] = "SveTrigonometricSelectCoefficient_double_ulong", ["Isa"] = "Sve", ["Method"] = "TrigonometricSelectCoefficient", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveTrigonometricStartingValue.template",new Dictionary {["TestName"] = "SveTrigonometricStartingValue_float_uint", ["Isa"] = "Sve", ["Method"] = "TrigonometricStartingValue", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveTrigonometricStartingValue.template",new Dictionary {["TestName"] = "SveTrigonometricStartingValue_double_ulong", ["Isa"] = "Sve", ["Method"] = "TrigonometricStartingValue", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + + +// Sve firstfaulting + ("SveGatherVectorByteZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorByteZeroExtendFirstFaulting_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorByteZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorByteZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorByteZeroExtendFirstFaulting_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorByteZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorByteZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorByteZeroExtendFirstFaulting_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorByteZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorByteZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorByteZeroExtendFirstFaulting_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorByteZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorByteZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorByteZeroExtendFirstFaulting_int_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorByteZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorByteZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorByteZeroExtendFirstFaulting_uint_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorByteZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorByteZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorByteZeroExtendFirstFaulting_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorByteZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorByteZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorByteZeroExtendFirstFaulting_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorByteZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorByteZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorByteZeroExtendFirstFaulting_long_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorByteZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorByteZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorByteZeroExtendFirstFaulting_ulong_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorByteZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorByteZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorByteZeroExtendFirstFaulting_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorByteZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorByteZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorByteZeroExtendFirstFaulting_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorByteZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorFirstFaulting_float_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorFirstFaulting_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorFirstFaulting_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorFirstFaulting_double_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorFirstFaulting_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorFirstFaulting_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorFirstFaulting_float_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorFirstFaulting_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorFirstFaulting_uint_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorFirstFaulting_float_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorFirstFaulting_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorFirstFaulting_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorFirstFaulting_double_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorFirstFaulting_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorFirstFaulting_ulong_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorFirstFaulting_double_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorFirstFaulting_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorFirstFaulting_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt16SignExtendFirstFaulting_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt16SignExtendFirstFaulting_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt16SignExtendFirstFaulting_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt16SignExtendFirstFaulting_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt16SignExtendFirstFaulting_int_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt16SignExtendFirstFaulting_uint_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt16SignExtendFirstFaulting_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt16SignExtendFirstFaulting_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt16SignExtendFirstFaulting_long_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt16SignExtendFirstFaulting_ulong_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt16SignExtendFirstFaulting_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt16SignExtendFirstFaulting_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16WithByteOffsetsSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt16WithByteOffsetsSignExtendFirstFaulting_int_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16WithByteOffsetsSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt16WithByteOffsetsSignExtendFirstFaulting_uint_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16WithByteOffsetsSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt16WithByteOffsetsSignExtendFirstFaulting_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16WithByteOffsetsSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt16WithByteOffsetsSignExtendFirstFaulting_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16WithByteOffsetsSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt16WithByteOffsetsSignExtendFirstFaulting_long_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16WithByteOffsetsSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt16WithByteOffsetsSignExtendFirstFaulting_ulong_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16WithByteOffsetsSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt16WithByteOffsetsSignExtendFirstFaulting_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt16WithByteOffsetsSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt16WithByteOffsetsSignExtendFirstFaulting_ulong_ulong",["Isa"] = "Sve", ["Method"] = "GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt32SignExtendFirstFaulting_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt32SignExtendFirstFaulting_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt32SignExtendFirstFaulting_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt32SignExtendFirstFaulting_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt32SignExtendFirstFaulting_long_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt32SignExtendFirstFaulting_int_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt32SignExtendFirstFaulting_ulong_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt32SignExtendFirstFaulting_uint_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt32SignExtendFirstFaulting_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt32SignExtendFirstFaulting_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt32SignExtendFirstFaulting_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt32SignExtendFirstFaulting_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32WithByteOffsetsSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt32WithByteOffsetsSignExtendFirstFaulting_long_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32WithByteOffsetsSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt32WithByteOffsetsSignExtendFirstFaulting_int_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32WithByteOffsetsSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt32WithByteOffsetsSignExtendFirstFaulting_ulong_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32WithByteOffsetsSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt32WithByteOffsetsSignExtendFirstFaulting_uint_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32WithByteOffsetsSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt32WithByteOffsetsSignExtendFirstFaulting_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32WithByteOffsetsSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt32WithByteOffsetsSignExtendFirstFaulting_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32WithByteOffsetsSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt32WithByteOffsetsSignExtendFirstFaulting_ulong_ulong",["Isa"] = "Sve", ["Method"] = "GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorInt32WithByteOffsetsSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorInt32WithByteOffsetsSignExtendFirstFaulting_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorSByteSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorSByteSignExtendFirstFaulting_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorSByteSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorSByteSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorSByteSignExtendFirstFaulting_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorSByteSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorSByteSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorSByteSignExtendFirstFaulting_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorSByteSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorSByteSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorSByteSignExtendFirstFaulting_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorSByteSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorSByteSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorSByteSignExtendFirstFaulting_int_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorSByteSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorSByteSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorSByteSignExtendFirstFaulting_uint_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorSByteSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorSByteSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorSByteSignExtendFirstFaulting_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorSByteSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorSByteSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorSByteSignExtendFirstFaulting_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorSByteSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorSByteSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorSByteSignExtendFirstFaulting_long_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorSByteSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorSByteSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorSByteSignExtendFirstFaulting_ulong_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorSByteSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorSByteSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorSByteSignExtendFirstFaulting_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorSByteSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorSByteSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorSByteSignExtendFirstFaulting_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorSByteSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting_int_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting_uint_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting_uint_uint",["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting_long_long",["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting_ulong_long",["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting_long_ulong",["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting_ulong_ulong",["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16ZeroExtendFirstFaulting_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16ZeroExtendFirstFaulting_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16ZeroExtendFirstFaulting_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16ZeroExtendFirstFaulting_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16ZeroExtendFirstFaulting_int_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16ZeroExtendFirstFaulting_uint_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16ZeroExtendFirstFaulting_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16ZeroExtendFirstFaulting_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16ZeroExtendFirstFaulting_long_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16ZeroExtendFirstFaulting_ulong_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16ZeroExtendFirstFaulting_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt16ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt16ZeroExtendFirstFaulting_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt16ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting_long_long",["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting_int_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting_ulong_long",["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting_uint_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting_long_ulong",["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting_ulong_ulong",["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting_uint_uint",["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32ZeroExtendFirstFaulting_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32ZeroExtendFirstFaulting_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32ZeroExtendFirstFaulting_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32ZeroExtendFirstFaulting_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32ZeroExtendFirstFaulting_long_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32ZeroExtendFirstFaulting_int_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32ZeroExtendFirstFaulting_ulong_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32ZeroExtendFirstFaulting_uint_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32ZeroExtendFirstFaulting_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32ZeroExtendFirstFaulting_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32ZeroExtendFirstFaulting_ulong_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorUInt32ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorUInt32ZeroExtendFirstFaulting_uint_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorUInt32ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorWithByteOffsetFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorWithByteOffsetFirstFaulting_float_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorWithByteOffsetFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorWithByteOffsetFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorWithByteOffsetFirstFaulting_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorWithByteOffsetFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorWithByteOffsetFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorWithByteOffsetFirstFaulting_uint_int", ["Isa"] = "Sve", ["Method"] = "GatherVectorWithByteOffsetFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorWithByteOffsetFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorWithByteOffsetFirstFaulting_float_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorWithByteOffsetFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorWithByteOffsetFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorWithByteOffsetFirstFaulting_int_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorWithByteOffsetFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorWithByteOffsetFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorWithByteOffsetFirstFaulting_uint", ["Isa"] = "Sve", ["Method"] = "GatherVectorWithByteOffsetFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorWithByteOffsetFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorWithByteOffsetFirstFaulting_double_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorWithByteOffsetFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorWithByteOffsetFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorWithByteOffsetFirstFaulting_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorWithByteOffsetFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorWithByteOffsetFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorWithByteOffsetFirstFaulting_ulong_long", ["Isa"] = "Sve", ["Method"] = "GatherVectorWithByteOffsetFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorWithByteOffsetFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorWithByteOffsetFirstFaulting_double_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorWithByteOffsetFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorWithByteOffsetFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorWithByteOffsetFirstFaulting_long_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorWithByteOffsetFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGatherVectorWithByteOffsetFirstFaulting.template",new Dictionary {["TestName"] = "SveGatherVectorWithByteOffsetFirstFaulting_ulong", ["Isa"] = "Sve", ["Method"] = "GatherVectorWithByteOffsetFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveGetFfr.template", new Dictionary { ["TestName"] = "SveGetFfr_sbyte", ["Isa"] = "Sve", ["Method"] = "GetFfr", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveGetFfr.template", new Dictionary { ["TestName"] = "SveGetFfr_short", ["Isa"] = "Sve", ["Method"] = "GetFfr", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveGetFfr.template", new Dictionary { ["TestName"] = "SveGetFfr_int", ["Isa"] = "Sve", ["Method"] = "GetFfr", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGetFfr.template", new Dictionary { ["TestName"] = "SveGetFfr_long", ["Isa"] = "Sve", ["Method"] = "GetFfr", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGetFfr.template", new Dictionary { ["TestName"] = "SveGetFfr_byte", ["Isa"] = "Sve", ["Method"] = "GetFfr", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveGetFfr.template", new Dictionary { ["TestName"] = "SveGetFfr_ushort", ["Isa"] = "Sve", ["Method"] = "GetFfr", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveGetFfr.template", new Dictionary { ["TestName"] = "SveGetFfr_uint", ["Isa"] = "Sve", ["Method"] = "GetFfr", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGetFfr.template", new Dictionary { ["TestName"] = "SveGetFfr_ulong", ["Isa"] = "Sve", ["Method"] = "GetFfr", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorByteZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorByteZeroExtendFirstFaulting_short", ["Isa"] = "Sve", ["Method"] = "LoadVectorByteZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorByteZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorByteZeroExtendFirstFaulting_int", ["Isa"] = "Sve", ["Method"] = "LoadVectorByteZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorByteZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorByteZeroExtendFirstFaulting_long", ["Isa"] = "Sve", ["Method"] = "LoadVectorByteZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorByteZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorByteZeroExtendFirstFaulting_ushort", ["Isa"] = "Sve", ["Method"] = "LoadVectorByteZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorByteZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorByteZeroExtendFirstFaulting_uint", ["Isa"] = "Sve", ["Method"] = "LoadVectorByteZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorByteZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorByteZeroExtendFirstFaulting_ulong", ["Isa"] = "Sve", ["Method"] = "LoadVectorByteZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorFirstFaulting_float", ["Isa"] = "Sve", ["Method"] = "LoadVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorFirstFaulting_double", ["Isa"] = "Sve", ["Method"] = "LoadVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorFirstFaulting_sbyte", ["Isa"] = "Sve", ["Method"] = "LoadVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorFirstFaulting_short", ["Isa"] = "Sve", ["Method"] = "LoadVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorFirstFaulting_int", ["Isa"] = "Sve", ["Method"] = "LoadVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorFirstFaulting_long", ["Isa"] = "Sve", ["Method"] = "LoadVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorFirstFaulting_byte", ["Isa"] = "Sve", ["Method"] = "LoadVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorFirstFaulting_ushort", ["Isa"] = "Sve", ["Method"] = "LoadVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorFirstFaulting_uint", ["Isa"] = "Sve", ["Method"] = "LoadVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorFirstFaulting_ulong", ["Isa"] = "Sve", ["Method"] = "LoadVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorInt16SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorInt16SignExtendFirstFaulting_int", ["Isa"] = "Sve", ["Method"] = "LoadVectorInt16SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorInt16SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorInt16SignExtendFirstFaulting_long", ["Isa"] = "Sve", ["Method"] = "LoadVectorInt16SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorInt16SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorInt16SignExtendFirstFaulting_uint", ["Isa"] = "Sve", ["Method"] = "LoadVectorInt16SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorInt16SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorInt16SignExtendFirstFaulting_ulong", ["Isa"] = "Sve", ["Method"] = "LoadVectorInt16SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorInt32SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorInt32SignExtendFirstFaulting_long", ["Isa"] = "Sve", ["Method"] = "LoadVectorInt32SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorInt32SignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorInt32SignExtendFirstFaulting_ulong", ["Isa"] = "Sve", ["Method"] = "LoadVectorInt32SignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorSByteSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorSByteSignExtendFirstFaulting_short", ["Isa"] = "Sve", ["Method"] = "LoadVectorSByteSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorSByteSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorSByteSignExtendFirstFaulting_int", ["Isa"] = "Sve", ["Method"] = "LoadVectorSByteSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorSByteSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorSByteSignExtendFirstFaulting_long", ["Isa"] = "Sve", ["Method"] = "LoadVectorSByteSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorSByteSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorSByteSignExtendFirstFaulting_ushort", ["Isa"] = "Sve", ["Method"] = "LoadVectorSByteSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorSByteSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorSByteSignExtendFirstFaulting_uint", ["Isa"] = "Sve", ["Method"] = "LoadVectorSByteSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorSByteSignExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorSByteSignExtendFirstFaulting_ulong", ["Isa"] = "Sve", ["Method"] = "LoadVectorSByteSignExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorUInt16ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorUInt16ZeroExtendFirstFaulting_int", ["Isa"] = "Sve", ["Method"] = "LoadVectorUInt16ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorUInt16ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorUInt16ZeroExtendFirstFaulting_long", ["Isa"] = "Sve", ["Method"] = "LoadVectorUInt16ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorUInt16ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorUInt16ZeroExtendFirstFaulting_uint", ["Isa"] = "Sve", ["Method"] = "LoadVectorUInt16ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorUInt16ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorUInt16ZeroExtendFirstFaulting_ulong", ["Isa"] = "Sve", ["Method"] = "LoadVectorUInt16ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorUInt32ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorUInt32ZeroExtendFirstFaulting_long", ["Isa"] = "Sve", ["Method"] = "LoadVectorUInt32ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveLoadVectorUInt32ZeroExtendFirstFaulting.template",new Dictionary {["TestName"] = "SveLoadVectorUInt32ZeroExtendFirstFaulting_ulong", ["Isa"] = "Sve", ["Method"] = "LoadVectorUInt32ZeroExtendFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveSetFfr.template", new Dictionary { ["TestName"] = "SveSetFfr_sbyte", ["Isa"] = "Sve", ["Method"] = "SetFfr", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveSetFfr.template", new Dictionary { ["TestName"] = "SveSetFfr_short", ["Isa"] = "Sve", ["Method"] = "SetFfr", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveSetFfr.template", new Dictionary { ["TestName"] = "SveSetFfr_int", ["Isa"] = "Sve", ["Method"] = "SetFfr", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveSetFfr.template", new Dictionary { ["TestName"] = "SveSetFfr_long", ["Isa"] = "Sve", ["Method"] = "SetFfr", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveSetFfr.template", new Dictionary { ["TestName"] = "SveSetFfr_byte", ["Isa"] = "Sve", ["Method"] = "SetFfr", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveSetFfr.template", new Dictionary { ["TestName"] = "SveSetFfr_ushort", ["Isa"] = "Sve", ["Method"] = "SetFfr", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveSetFfr.template", new Dictionary { ["TestName"] = "SveSetFfr_uint", ["Isa"] = "Sve", ["Method"] = "SetFfr", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveSetFfr.template", new Dictionary { ["TestName"] = "SveSetFfr_ulong", ["Isa"] = "Sve", ["Method"] = "SetFfr", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + + +// Sve counting + ("SveCount16BitElements.template", new Dictionary { ["TestName"] = "SveCount16BitElements_", ["Isa"] = "Sve", ["Method"] = "Count16BitElements", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "SveMaskPattern", ["LargestVectorSize"] = "8",}), + ("SveCount32BitElements.template", new Dictionary { ["TestName"] = "SveCount32BitElements_", ["Isa"] = "Sve", ["Method"] = "Count32BitElements", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "SveMaskPattern", ["LargestVectorSize"] = "8",}), + ("SveCount64BitElements.template", new Dictionary { ["TestName"] = "SveCount64BitElements_", ["Isa"] = "Sve", ["Method"] = "Count64BitElements", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "SveMaskPattern", ["LargestVectorSize"] = "8",}), + ("SveCount8BitElements.template", new Dictionary { ["TestName"] = "SveCount8BitElements_", ["Isa"] = "Sve", ["Method"] = "Count8BitElements", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "SveMaskPattern", ["LargestVectorSize"] = "8",}), + ("SveGetActiveElementCount.template", new Dictionary { ["TestName"] = "SveGetActiveElementCount_byte", ["Isa"] = "Sve", ["Method"] = "GetActiveElementCount", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveGetActiveElementCount.template", new Dictionary { ["TestName"] = "SveGetActiveElementCount_sbyte", ["Isa"] = "Sve", ["Method"] = "GetActiveElementCount", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveGetActiveElementCount.template", new Dictionary { ["TestName"] = "SveGetActiveElementCount_short", ["Isa"] = "Sve", ["Method"] = "GetActiveElementCount", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveGetActiveElementCount.template", new Dictionary { ["TestName"] = "SveGetActiveElementCount_int", ["Isa"] = "Sve", ["Method"] = "GetActiveElementCount", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveGetActiveElementCount.template", new Dictionary { ["TestName"] = "SveGetActiveElementCount_long", ["Isa"] = "Sve", ["Method"] = "GetActiveElementCount", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveGetActiveElementCount.template", new Dictionary { ["TestName"] = "SveGetActiveElementCount_float", ["Isa"] = "Sve", ["Method"] = "GetActiveElementCount", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveGetActiveElementCount.template", new Dictionary { ["TestName"] = "SveGetActiveElementCount_double", ["Isa"] = "Sve", ["Method"] = "GetActiveElementCount", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveGetActiveElementCount.template", new Dictionary { ["TestName"] = "SveGetActiveElementCount_ushort", ["Isa"] = "Sve", ["Method"] = "GetActiveElementCount", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveGetActiveElementCount.template", new Dictionary { ["TestName"] = "SveGetActiveElementCount_uint", ["Isa"] = "Sve", ["Method"] = "GetActiveElementCount", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveGetActiveElementCount.template", new Dictionary { ["TestName"] = "SveGetActiveElementCount_ulong", ["Isa"] = "Sve", ["Method"] = "GetActiveElementCount", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveLeadingSignCount.template", new Dictionary { ["TestName"] = "SveLeadingSignCount_byte_sbyte", ["Isa"] = "Sve", ["Method"] = "LeadingSignCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLeadingSignCount.template", new Dictionary { ["TestName"] = "SveLeadingSignCount_ushort_short", ["Isa"] = "Sve", ["Method"] = "LeadingSignCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveLeadingSignCount.template", new Dictionary { ["TestName"] = "SveLeadingSignCount_uint_int", ["Isa"] = "Sve", ["Method"] = "LeadingSignCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveLeadingSignCount.template", new Dictionary { ["TestName"] = "SveLeadingSignCount_ulong_long", ["Isa"] = "Sve", ["Method"] = "LeadingSignCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveLeadingZeroCount.template", new Dictionary { ["TestName"] = "SveLeadingZeroCount_byte_sbyte", ["Isa"] = "Sve", ["Method"] = "LeadingZeroCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveLeadingZeroCount.template", new Dictionary { ["TestName"] = "SveLeadingZeroCount_ushort_short", ["Isa"] = "Sve", ["Method"] = "LeadingZeroCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveLeadingZeroCount.template", new Dictionary { ["TestName"] = "SveLeadingZeroCount_uint_int", ["Isa"] = "Sve", ["Method"] = "LeadingZeroCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveLeadingZeroCount.template", new Dictionary { ["TestName"] = "SveLeadingZeroCount_ulong_long", ["Isa"] = "Sve", ["Method"] = "LeadingZeroCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveLeadingZeroCount.template", new Dictionary { ["TestName"] = "SveLeadingZeroCount_byte", ["Isa"] = "Sve", ["Method"] = "LeadingZeroCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveLeadingZeroCount.template", new Dictionary { ["TestName"] = "SveLeadingZeroCount_ushort", ["Isa"] = "Sve", ["Method"] = "LeadingZeroCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveLeadingZeroCount.template", new Dictionary { ["TestName"] = "SveLeadingZeroCount_uint", ["Isa"] = "Sve", ["Method"] = "LeadingZeroCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveLeadingZeroCount.template", new Dictionary { ["TestName"] = "SveLeadingZeroCount_ulong", ["Isa"] = "Sve", ["Method"] = "LeadingZeroCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SvePopCount.template", new Dictionary { ["TestName"] = "SvePopCount_uint_float", ["Isa"] = "Sve", ["Method"] = "PopCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SvePopCount.template", new Dictionary { ["TestName"] = "SvePopCount_ulong_double", ["Isa"] = "Sve", ["Method"] = "PopCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SvePopCount.template", new Dictionary { ["TestName"] = "SvePopCount_byte_sbyte", ["Isa"] = "Sve", ["Method"] = "PopCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SvePopCount.template", new Dictionary { ["TestName"] = "SvePopCount_ushort_short", ["Isa"] = "Sve", ["Method"] = "PopCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SvePopCount.template", new Dictionary { ["TestName"] = "SvePopCount_uint_int", ["Isa"] = "Sve", ["Method"] = "PopCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SvePopCount.template", new Dictionary { ["TestName"] = "SvePopCount_ulong_long", ["Isa"] = "Sve", ["Method"] = "PopCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SvePopCount.template", new Dictionary { ["TestName"] = "SvePopCount_byte", ["Isa"] = "Sve", ["Method"] = "PopCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SvePopCount.template", new Dictionary { ["TestName"] = "SvePopCount_ushort", ["Isa"] = "Sve", ["Method"] = "PopCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SvePopCount.template", new Dictionary { ["TestName"] = "SvePopCount_uint", ["Isa"] = "Sve", ["Method"] = "PopCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SvePopCount.template", new Dictionary { ["TestName"] = "SvePopCount_ulong", ["Isa"] = "Sve", ["Method"] = "PopCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementBy16BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementBy16BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementBy16BitElementCount", ["RetBaseType"] = "Int32", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementBy16BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementBy16BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementBy16BitElementCount", ["RetBaseType"] = "Int64", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementBy16BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementBy16BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementBy16BitElementCount", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementBy16BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementBy16BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementBy16BitElementCount", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementBy16BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementBy16BitElementCount_short", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementBy16BitElementCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementBy16BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementBy16BitElementCount_ushort", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementBy16BitElementCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementBy32BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementBy32BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementBy32BitElementCount", ["RetBaseType"] = "Int32", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementBy32BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementBy32BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementBy32BitElementCount", ["RetBaseType"] = "Int64", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementBy32BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementBy32BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementBy32BitElementCount", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementBy32BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementBy32BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementBy32BitElementCount", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementBy32BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementBy32BitElementCount_int", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementBy32BitElementCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementBy32BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementBy32BitElementCount_uint", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementBy32BitElementCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementBy64BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementBy64BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementBy64BitElementCount", ["RetBaseType"] = "Int32", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementBy64BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementBy64BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementBy64BitElementCount", ["RetBaseType"] = "Int64", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementBy64BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementBy64BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementBy64BitElementCount", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementBy64BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementBy64BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementBy64BitElementCount", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementBy64BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementBy64BitElementCount_long", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementBy64BitElementCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementBy64BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementBy64BitElementCount_ulong", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementBy64BitElementCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementBy8BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementBy8BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementBy8BitElementCount", ["RetBaseType"] = "Int32", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementBy8BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementBy8BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementBy8BitElementCount", ["RetBaseType"] = "Int64", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementBy8BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementBy8BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementBy8BitElementCount", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementBy8BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementBy8BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementBy8BitElementCount", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementByActiveElementCount_byte", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementByActiveElementCount", ["RetBaseType"] = "Int32", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementByActiveElementCount_ushort", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementByActiveElementCount", ["RetBaseType"] = "Int32", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementByActiveElementCount_uint", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementByActiveElementCount", ["RetBaseType"] = "Int32", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementByActiveElementCount_ulong", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementByActiveElementCount", ["RetBaseType"] = "Int32", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementByActiveElementCount_byte", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementByActiveElementCount", ["RetBaseType"] = "Int64", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementByActiveElementCount_ushort", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementByActiveElementCount", ["RetBaseType"] = "Int64", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementByActiveElementCount_uint", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementByActiveElementCount", ["RetBaseType"] = "Int64", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementByActiveElementCount_ulong", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementByActiveElementCount", ["RetBaseType"] = "Int64", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementByActiveElementCount_byte", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementByActiveElementCount", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementByActiveElementCount_ushort", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementByActiveElementCount", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementByActiveElementCount_uint", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementByActiveElementCount", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementByActiveElementCount_ulong", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementByActiveElementCount", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementByActiveElementCount_byte", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementByActiveElementCount", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementByActiveElementCount_ushort", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementByActiveElementCount", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementByActiveElementCount_uint", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementByActiveElementCount", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementByActiveElementCount_ulong", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementByActiveElementCount", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementByActiveElementCount_short", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementByActiveElementCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementByActiveElementCount_int", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementByActiveElementCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementByActiveElementCount_long", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementByActiveElementCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementByActiveElementCount_ushort", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementByActiveElementCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementByActiveElementCount_uint", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementByActiveElementCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveSaturatingDecrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingDecrementByActiveElementCount_ulong", ["Isa"] = "Sve", ["Method"] = "SaturatingDecrementByActiveElementCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementBy16BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementBy16BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementBy16BitElementCount", ["RetBaseType"] = "Int32", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementBy16BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementBy16BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementBy16BitElementCount", ["RetBaseType"] = "Int64", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementBy16BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementBy16BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementBy16BitElementCount", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementBy16BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementBy16BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementBy16BitElementCount", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementBy16BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementBy16BitElementCount_short", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementBy16BitElementCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementBy16BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementBy16BitElementCount_ushort", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementBy16BitElementCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementBy32BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementBy32BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementBy32BitElementCount", ["RetBaseType"] = "Int32", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementBy32BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementBy32BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementBy32BitElementCount", ["RetBaseType"] = "Int64", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementBy32BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementBy32BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementBy32BitElementCount", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementBy32BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementBy32BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementBy32BitElementCount", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementBy32BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementBy32BitElementCount_int", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementBy32BitElementCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementBy32BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementBy32BitElementCount_uint", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementBy32BitElementCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementBy64BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementBy64BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementBy64BitElementCount", ["RetBaseType"] = "Int32", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementBy64BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementBy64BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementBy64BitElementCount", ["RetBaseType"] = "Int64", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementBy64BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementBy64BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementBy64BitElementCount", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementBy64BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementBy64BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementBy64BitElementCount", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementBy64BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementBy64BitElementCount_long", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementBy64BitElementCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementBy64BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementBy64BitElementCount_ulong", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementBy64BitElementCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementBy8BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementBy8BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementBy8BitElementCount", ["RetBaseType"] = "Int32", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementBy8BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementBy8BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementBy8BitElementCount", ["RetBaseType"] = "Int64", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementBy8BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementBy8BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementBy8BitElementCount", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementBy8BitElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementBy8BitElementCount_", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementBy8BitElementCount", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "SveMaskPattern",["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementByActiveElementCount_byte", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementByActiveElementCount", ["RetBaseType"] = "Int32", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementByActiveElementCount_ushort", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementByActiveElementCount", ["RetBaseType"] = "Int32", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementByActiveElementCount_uint", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementByActiveElementCount", ["RetBaseType"] = "Int32", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementByActiveElementCount_ulong", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementByActiveElementCount", ["RetBaseType"] = "Int32", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementByActiveElementCount_byte", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementByActiveElementCount", ["RetBaseType"] = "Int64", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementByActiveElementCount_ushort", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementByActiveElementCount", ["RetBaseType"] = "Int64", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementByActiveElementCount_uint", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementByActiveElementCount", ["RetBaseType"] = "Int64", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementByActiveElementCount_ulong", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementByActiveElementCount", ["RetBaseType"] = "Int64", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementByActiveElementCount_byte", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementByActiveElementCount", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementByActiveElementCount_ushort", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementByActiveElementCount", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementByActiveElementCount_uint", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementByActiveElementCount", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementByActiveElementCount_ulong", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementByActiveElementCount", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementByActiveElementCount_byte", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementByActiveElementCount", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementByActiveElementCount_ushort", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementByActiveElementCount", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementByActiveElementCount_uint", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementByActiveElementCount", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementByActiveElementCount_ulong", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementByActiveElementCount", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementByActiveElementCount_short", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementByActiveElementCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementByActiveElementCount_int", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementByActiveElementCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementByActiveElementCount_long", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementByActiveElementCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementByActiveElementCount_ushort", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementByActiveElementCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementByActiveElementCount_uint", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementByActiveElementCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveSaturatingIncrementByActiveElementCount.template",new Dictionary {["TestName"] = "SveSaturatingIncrementByActiveElementCount_ulong", ["Isa"] = "Sve", ["Method"] = "SaturatingIncrementByActiveElementCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + + +// Sve bitwise + ("SveAnd.template", new Dictionary { ["TestName"] = "SveAnd_sbyte", ["Isa"] = "Sve", ["Method"] = "And", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveAnd.template", new Dictionary { ["TestName"] = "SveAnd_short", ["Isa"] = "Sve", ["Method"] = "And", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveAnd.template", new Dictionary { ["TestName"] = "SveAnd_int", ["Isa"] = "Sve", ["Method"] = "And", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveAnd.template", new Dictionary { ["TestName"] = "SveAnd_long", ["Isa"] = "Sve", ["Method"] = "And", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveAnd.template", new Dictionary { ["TestName"] = "SveAnd_byte", ["Isa"] = "Sve", ["Method"] = "And", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveAnd.template", new Dictionary { ["TestName"] = "SveAnd_ushort", ["Isa"] = "Sve", ["Method"] = "And", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveAnd.template", new Dictionary { ["TestName"] = "SveAnd_uint", ["Isa"] = "Sve", ["Method"] = "And", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveAnd.template", new Dictionary { ["TestName"] = "SveAnd_ulong", ["Isa"] = "Sve", ["Method"] = "And", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveAndAcross.template", new Dictionary { ["TestName"] = "SveAndAcross_sbyte", ["Isa"] = "Sve", ["Method"] = "AndAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveAndAcross.template", new Dictionary { ["TestName"] = "SveAndAcross_short", ["Isa"] = "Sve", ["Method"] = "AndAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveAndAcross.template", new Dictionary { ["TestName"] = "SveAndAcross_int", ["Isa"] = "Sve", ["Method"] = "AndAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveAndAcross.template", new Dictionary { ["TestName"] = "SveAndAcross_long", ["Isa"] = "Sve", ["Method"] = "AndAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveAndAcross.template", new Dictionary { ["TestName"] = "SveAndAcross_byte", ["Isa"] = "Sve", ["Method"] = "AndAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveAndAcross.template", new Dictionary { ["TestName"] = "SveAndAcross_ushort", ["Isa"] = "Sve", ["Method"] = "AndAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveAndAcross.template", new Dictionary { ["TestName"] = "SveAndAcross_uint", ["Isa"] = "Sve", ["Method"] = "AndAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveAndAcross.template", new Dictionary { ["TestName"] = "SveAndAcross_ulong", ["Isa"] = "Sve", ["Method"] = "AndAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveAndNot.template", new Dictionary { ["TestName"] = "SveAndNot_sbyte", ["Isa"] = "Sve", ["Method"] = "AndNot", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveAndNot.template", new Dictionary { ["TestName"] = "SveAndNot_short", ["Isa"] = "Sve", ["Method"] = "AndNot", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveAndNot.template", new Dictionary { ["TestName"] = "SveAndNot_int", ["Isa"] = "Sve", ["Method"] = "AndNot", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveAndNot.template", new Dictionary { ["TestName"] = "SveAndNot_long", ["Isa"] = "Sve", ["Method"] = "AndNot", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveAndNot.template", new Dictionary { ["TestName"] = "SveAndNot_byte", ["Isa"] = "Sve", ["Method"] = "AndNot", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveAndNot.template", new Dictionary { ["TestName"] = "SveAndNot_ushort", ["Isa"] = "Sve", ["Method"] = "AndNot", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveAndNot.template", new Dictionary { ["TestName"] = "SveAndNot_uint", ["Isa"] = "Sve", ["Method"] = "AndNot", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveAndNot.template", new Dictionary { ["TestName"] = "SveAndNot_ulong", ["Isa"] = "Sve", ["Method"] = "AndNot", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveBitwiseClear.template", new Dictionary { ["TestName"] = "SveBitwiseClear_sbyte", ["Isa"] = "Sve", ["Method"] = "BitwiseClear", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveBitwiseClear.template", new Dictionary { ["TestName"] = "SveBitwiseClear_short", ["Isa"] = "Sve", ["Method"] = "BitwiseClear", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveBitwiseClear.template", new Dictionary { ["TestName"] = "SveBitwiseClear_int", ["Isa"] = "Sve", ["Method"] = "BitwiseClear", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveBitwiseClear.template", new Dictionary { ["TestName"] = "SveBitwiseClear_long", ["Isa"] = "Sve", ["Method"] = "BitwiseClear", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveBitwiseClear.template", new Dictionary { ["TestName"] = "SveBitwiseClear_byte", ["Isa"] = "Sve", ["Method"] = "BitwiseClear", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveBitwiseClear.template", new Dictionary { ["TestName"] = "SveBitwiseClear_ushort", ["Isa"] = "Sve", ["Method"] = "BitwiseClear", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveBitwiseClear.template", new Dictionary { ["TestName"] = "SveBitwiseClear_uint", ["Isa"] = "Sve", ["Method"] = "BitwiseClear", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveBitwiseClear.template", new Dictionary { ["TestName"] = "SveBitwiseClear_ulong", ["Isa"] = "Sve", ["Method"] = "BitwiseClear", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveBooleanNot.template", new Dictionary { ["TestName"] = "SveBooleanNot_sbyte", ["Isa"] = "Sve", ["Method"] = "BooleanNot", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveBooleanNot.template", new Dictionary { ["TestName"] = "SveBooleanNot_short", ["Isa"] = "Sve", ["Method"] = "BooleanNot", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveBooleanNot.template", new Dictionary { ["TestName"] = "SveBooleanNot_int", ["Isa"] = "Sve", ["Method"] = "BooleanNot", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveBooleanNot.template", new Dictionary { ["TestName"] = "SveBooleanNot_long", ["Isa"] = "Sve", ["Method"] = "BooleanNot", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveBooleanNot.template", new Dictionary { ["TestName"] = "SveBooleanNot_byte", ["Isa"] = "Sve", ["Method"] = "BooleanNot", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveBooleanNot.template", new Dictionary { ["TestName"] = "SveBooleanNot_ushort", ["Isa"] = "Sve", ["Method"] = "BooleanNot", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveBooleanNot.template", new Dictionary { ["TestName"] = "SveBooleanNot_uint", ["Isa"] = "Sve", ["Method"] = "BooleanNot", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveBooleanNot.template", new Dictionary { ["TestName"] = "SveBooleanNot_ulong", ["Isa"] = "Sve", ["Method"] = "BooleanNot", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveInsertIntoShiftedVector.template",new Dictionary {["TestName"] = "SveInsertIntoShiftedVector_float", ["Isa"] = "Sve", ["Method"] = "InsertIntoShiftedVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveInsertIntoShiftedVector.template",new Dictionary {["TestName"] = "SveInsertIntoShiftedVector_double", ["Isa"] = "Sve", ["Method"] = "InsertIntoShiftedVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveInsertIntoShiftedVector.template",new Dictionary {["TestName"] = "SveInsertIntoShiftedVector_sbyte", ["Isa"] = "Sve", ["Method"] = "InsertIntoShiftedVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveInsertIntoShiftedVector.template",new Dictionary {["TestName"] = "SveInsertIntoShiftedVector_short", ["Isa"] = "Sve", ["Method"] = "InsertIntoShiftedVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveInsertIntoShiftedVector.template",new Dictionary {["TestName"] = "SveInsertIntoShiftedVector_int", ["Isa"] = "Sve", ["Method"] = "InsertIntoShiftedVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveInsertIntoShiftedVector.template",new Dictionary {["TestName"] = "SveInsertIntoShiftedVector_long", ["Isa"] = "Sve", ["Method"] = "InsertIntoShiftedVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveInsertIntoShiftedVector.template",new Dictionary {["TestName"] = "SveInsertIntoShiftedVector_byte", ["Isa"] = "Sve", ["Method"] = "InsertIntoShiftedVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveInsertIntoShiftedVector.template",new Dictionary {["TestName"] = "SveInsertIntoShiftedVector_ushort", ["Isa"] = "Sve", ["Method"] = "InsertIntoShiftedVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveInsertIntoShiftedVector.template",new Dictionary {["TestName"] = "SveInsertIntoShiftedVector_uint", ["Isa"] = "Sve", ["Method"] = "InsertIntoShiftedVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveInsertIntoShiftedVector.template",new Dictionary {["TestName"] = "SveInsertIntoShiftedVector_ulong", ["Isa"] = "Sve", ["Method"] = "InsertIntoShiftedVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveNot.template", new Dictionary { ["TestName"] = "SveNot_sbyte", ["Isa"] = "Sve", ["Method"] = "Not", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveNot.template", new Dictionary { ["TestName"] = "SveNot_short", ["Isa"] = "Sve", ["Method"] = "Not", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveNot.template", new Dictionary { ["TestName"] = "SveNot_int", ["Isa"] = "Sve", ["Method"] = "Not", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveNot.template", new Dictionary { ["TestName"] = "SveNot_long", ["Isa"] = "Sve", ["Method"] = "Not", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveNot.template", new Dictionary { ["TestName"] = "SveNot_byte", ["Isa"] = "Sve", ["Method"] = "Not", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveNot.template", new Dictionary { ["TestName"] = "SveNot_ushort", ["Isa"] = "Sve", ["Method"] = "Not", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveNot.template", new Dictionary { ["TestName"] = "SveNot_uint", ["Isa"] = "Sve", ["Method"] = "Not", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveNot.template", new Dictionary { ["TestName"] = "SveNot_ulong", ["Isa"] = "Sve", ["Method"] = "Not", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveOr.template", new Dictionary { ["TestName"] = "SveOr_sbyte", ["Isa"] = "Sve", ["Method"] = "Or", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveOr.template", new Dictionary { ["TestName"] = "SveOr_short", ["Isa"] = "Sve", ["Method"] = "Or", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveOr.template", new Dictionary { ["TestName"] = "SveOr_int", ["Isa"] = "Sve", ["Method"] = "Or", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveOr.template", new Dictionary { ["TestName"] = "SveOr_long", ["Isa"] = "Sve", ["Method"] = "Or", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveOr.template", new Dictionary { ["TestName"] = "SveOr_byte", ["Isa"] = "Sve", ["Method"] = "Or", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveOr.template", new Dictionary { ["TestName"] = "SveOr_ushort", ["Isa"] = "Sve", ["Method"] = "Or", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveOr.template", new Dictionary { ["TestName"] = "SveOr_uint", ["Isa"] = "Sve", ["Method"] = "Or", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveOr.template", new Dictionary { ["TestName"] = "SveOr_ulong", ["Isa"] = "Sve", ["Method"] = "Or", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveOrAcross.template", new Dictionary { ["TestName"] = "SveOrAcross_sbyte", ["Isa"] = "Sve", ["Method"] = "OrAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveOrAcross.template", new Dictionary { ["TestName"] = "SveOrAcross_short", ["Isa"] = "Sve", ["Method"] = "OrAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveOrAcross.template", new Dictionary { ["TestName"] = "SveOrAcross_int", ["Isa"] = "Sve", ["Method"] = "OrAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveOrAcross.template", new Dictionary { ["TestName"] = "SveOrAcross_long", ["Isa"] = "Sve", ["Method"] = "OrAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveOrAcross.template", new Dictionary { ["TestName"] = "SveOrAcross_byte", ["Isa"] = "Sve", ["Method"] = "OrAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveOrAcross.template", new Dictionary { ["TestName"] = "SveOrAcross_ushort", ["Isa"] = "Sve", ["Method"] = "OrAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveOrAcross.template", new Dictionary { ["TestName"] = "SveOrAcross_uint", ["Isa"] = "Sve", ["Method"] = "OrAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveOrAcross.template", new Dictionary { ["TestName"] = "SveOrAcross_ulong", ["Isa"] = "Sve", ["Method"] = "OrAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveOrNot.template", new Dictionary { ["TestName"] = "SveOrNot_sbyte", ["Isa"] = "Sve", ["Method"] = "OrNot", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveOrNot.template", new Dictionary { ["TestName"] = "SveOrNot_short", ["Isa"] = "Sve", ["Method"] = "OrNot", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveOrNot.template", new Dictionary { ["TestName"] = "SveOrNot_int", ["Isa"] = "Sve", ["Method"] = "OrNot", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveOrNot.template", new Dictionary { ["TestName"] = "SveOrNot_long", ["Isa"] = "Sve", ["Method"] = "OrNot", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveOrNot.template", new Dictionary { ["TestName"] = "SveOrNot_byte", ["Isa"] = "Sve", ["Method"] = "OrNot", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveOrNot.template", new Dictionary { ["TestName"] = "SveOrNot_ushort", ["Isa"] = "Sve", ["Method"] = "OrNot", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveOrNot.template", new Dictionary { ["TestName"] = "SveOrNot_uint", ["Isa"] = "Sve", ["Method"] = "OrNot", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveOrNot.template", new Dictionary { ["TestName"] = "SveOrNot_ulong", ["Isa"] = "Sve", ["Method"] = "OrNot", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveShiftLeftLogical.template", new Dictionary { ["TestName"] = "SveShiftLeftLogical_sbyte_byte", ["Isa"] = "Sve", ["Method"] = "ShiftLeftLogical", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveShiftLeftLogical.template", new Dictionary { ["TestName"] = "SveShiftLeftLogical_short_ushort", ["Isa"] = "Sve", ["Method"] = "ShiftLeftLogical", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveShiftLeftLogical.template", new Dictionary { ["TestName"] = "SveShiftLeftLogical_int_uint", ["Isa"] = "Sve", ["Method"] = "ShiftLeftLogical", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveShiftLeftLogical.template", new Dictionary { ["TestName"] = "SveShiftLeftLogical_long_ulong", ["Isa"] = "Sve", ["Method"] = "ShiftLeftLogical", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveShiftLeftLogical.template", new Dictionary { ["TestName"] = "SveShiftLeftLogical_byte", ["Isa"] = "Sve", ["Method"] = "ShiftLeftLogical", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveShiftLeftLogical.template", new Dictionary { ["TestName"] = "SveShiftLeftLogical_ushort", ["Isa"] = "Sve", ["Method"] = "ShiftLeftLogical", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveShiftLeftLogical.template", new Dictionary { ["TestName"] = "SveShiftLeftLogical_uint", ["Isa"] = "Sve", ["Method"] = "ShiftLeftLogical", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveShiftLeftLogical.template", new Dictionary { ["TestName"] = "SveShiftLeftLogical_ulong", ["Isa"] = "Sve", ["Method"] = "ShiftLeftLogical", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveShiftLeftLogical.template", new Dictionary { ["TestName"] = "SveShiftLeftLogical_sbyte_ulong", ["Isa"] = "Sve", ["Method"] = "ShiftLeftLogical", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveShiftLeftLogical.template", new Dictionary { ["TestName"] = "SveShiftLeftLogical_short_ulong", ["Isa"] = "Sve", ["Method"] = "ShiftLeftLogical", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveShiftLeftLogical.template", new Dictionary { ["TestName"] = "SveShiftLeftLogical_int_ulong", ["Isa"] = "Sve", ["Method"] = "ShiftLeftLogical", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveShiftLeftLogical.template", new Dictionary { ["TestName"] = "SveShiftLeftLogical_byte_ulong", ["Isa"] = "Sve", ["Method"] = "ShiftLeftLogical", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveShiftLeftLogical.template", new Dictionary { ["TestName"] = "SveShiftLeftLogical_ushort_ulong", ["Isa"] = "Sve", ["Method"] = "ShiftLeftLogical", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveShiftLeftLogical.template", new Dictionary { ["TestName"] = "SveShiftLeftLogical_uint_ulong", ["Isa"] = "Sve", ["Method"] = "ShiftLeftLogical", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveShiftRightArithmetic.template", new Dictionary { ["TestName"] = "SveShiftRightArithmetic_sbyte_byte", ["Isa"] = "Sve", ["Method"] = "ShiftRightArithmetic", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveShiftRightArithmetic.template", new Dictionary { ["TestName"] = "SveShiftRightArithmetic_short_ushort", ["Isa"] = "Sve", ["Method"] = "ShiftRightArithmetic", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveShiftRightArithmetic.template", new Dictionary { ["TestName"] = "SveShiftRightArithmetic_int_uint", ["Isa"] = "Sve", ["Method"] = "ShiftRightArithmetic", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveShiftRightArithmetic.template", new Dictionary { ["TestName"] = "SveShiftRightArithmetic_long_ulong", ["Isa"] = "Sve", ["Method"] = "ShiftRightArithmetic", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveShiftRightArithmetic.template", new Dictionary { ["TestName"] = "SveShiftRightArithmetic_sbyte_ulong", ["Isa"] = "Sve", ["Method"] = "ShiftRightArithmetic", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveShiftRightArithmetic.template", new Dictionary { ["TestName"] = "SveShiftRightArithmetic_short_ulong", ["Isa"] = "Sve", ["Method"] = "ShiftRightArithmetic", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveShiftRightArithmetic.template", new Dictionary { ["TestName"] = "SveShiftRightArithmetic_int_ulong", ["Isa"] = "Sve", ["Method"] = "ShiftRightArithmetic", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveShiftRightArithmeticForDivide.template",new Dictionary {["TestName"] = "SveShiftRightArithmeticForDivide_sbyte", ["Isa"] = "Sve", ["Method"] = "ShiftRightArithmeticForDivide", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveShiftRightArithmeticForDivide.template",new Dictionary {["TestName"] = "SveShiftRightArithmeticForDivide_short", ["Isa"] = "Sve", ["Method"] = "ShiftRightArithmeticForDivide", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveShiftRightArithmeticForDivide.template",new Dictionary {["TestName"] = "SveShiftRightArithmeticForDivide_int", ["Isa"] = "Sve", ["Method"] = "ShiftRightArithmeticForDivide", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveShiftRightArithmeticForDivide.template",new Dictionary {["TestName"] = "SveShiftRightArithmeticForDivide_long", ["Isa"] = "Sve", ["Method"] = "ShiftRightArithmeticForDivide", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveShiftRightLogical.template", new Dictionary { ["TestName"] = "SveShiftRightLogical_byte", ["Isa"] = "Sve", ["Method"] = "ShiftRightLogical", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveShiftRightLogical.template", new Dictionary { ["TestName"] = "SveShiftRightLogical_ushort", ["Isa"] = "Sve", ["Method"] = "ShiftRightLogical", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveShiftRightLogical.template", new Dictionary { ["TestName"] = "SveShiftRightLogical_uint", ["Isa"] = "Sve", ["Method"] = "ShiftRightLogical", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveShiftRightLogical.template", new Dictionary { ["TestName"] = "SveShiftRightLogical_ulong", ["Isa"] = "Sve", ["Method"] = "ShiftRightLogical", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveShiftRightLogical.template", new Dictionary { ["TestName"] = "SveShiftRightLogical_byte_ulong", ["Isa"] = "Sve", ["Method"] = "ShiftRightLogical", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveShiftRightLogical.template", new Dictionary { ["TestName"] = "SveShiftRightLogical_ushort_ulong", ["Isa"] = "Sve", ["Method"] = "ShiftRightLogical", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveShiftRightLogical.template", new Dictionary { ["TestName"] = "SveShiftRightLogical_uint_ulong", ["Isa"] = "Sve", ["Method"] = "ShiftRightLogical", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveXor.template", new Dictionary { ["TestName"] = "SveXor_sbyte", ["Isa"] = "Sve", ["Method"] = "Xor", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveXor.template", new Dictionary { ["TestName"] = "SveXor_short", ["Isa"] = "Sve", ["Method"] = "Xor", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveXor.template", new Dictionary { ["TestName"] = "SveXor_int", ["Isa"] = "Sve", ["Method"] = "Xor", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveXor.template", new Dictionary { ["TestName"] = "SveXor_long", ["Isa"] = "Sve", ["Method"] = "Xor", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveXor.template", new Dictionary { ["TestName"] = "SveXor_byte", ["Isa"] = "Sve", ["Method"] = "Xor", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveXor.template", new Dictionary { ["TestName"] = "SveXor_ushort", ["Isa"] = "Sve", ["Method"] = "Xor", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveXor.template", new Dictionary { ["TestName"] = "SveXor_uint", ["Isa"] = "Sve", ["Method"] = "Xor", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveXor.template", new Dictionary { ["TestName"] = "SveXor_ulong", ["Isa"] = "Sve", ["Method"] = "Xor", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveXorAcross.template", new Dictionary { ["TestName"] = "SveXorAcross_sbyte", ["Isa"] = "Sve", ["Method"] = "XorAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveXorAcross.template", new Dictionary { ["TestName"] = "SveXorAcross_short", ["Isa"] = "Sve", ["Method"] = "XorAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveXorAcross.template", new Dictionary { ["TestName"] = "SveXorAcross_int", ["Isa"] = "Sve", ["Method"] = "XorAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveXorAcross.template", new Dictionary { ["TestName"] = "SveXorAcross_long", ["Isa"] = "Sve", ["Method"] = "XorAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveXorAcross.template", new Dictionary { ["TestName"] = "SveXorAcross_byte", ["Isa"] = "Sve", ["Method"] = "XorAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveXorAcross.template", new Dictionary { ["TestName"] = "SveXorAcross_ushort", ["Isa"] = "Sve", ["Method"] = "XorAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveXorAcross.template", new Dictionary { ["TestName"] = "SveXorAcross_uint", ["Isa"] = "Sve", ["Method"] = "XorAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveXorAcross.template", new Dictionary { ["TestName"] = "SveXorAcross_ulong", ["Isa"] = "Sve", ["Method"] = "XorAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + + +// Sve bitmanipulate + ("SveDuplicateSelectedScalarToVector.template",new Dictionary {["TestName"] = "SveDuplicateSelectedScalarToVector_float", ["Isa"] = "Sve", ["Method"] = "DuplicateSelectedScalarToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveDuplicateSelectedScalarToVector.template",new Dictionary {["TestName"] = "SveDuplicateSelectedScalarToVector_double", ["Isa"] = "Sve", ["Method"] = "DuplicateSelectedScalarToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveDuplicateSelectedScalarToVector.template",new Dictionary {["TestName"] = "SveDuplicateSelectedScalarToVector_sbyte", ["Isa"] = "Sve", ["Method"] = "DuplicateSelectedScalarToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveDuplicateSelectedScalarToVector.template",new Dictionary {["TestName"] = "SveDuplicateSelectedScalarToVector_short", ["Isa"] = "Sve", ["Method"] = "DuplicateSelectedScalarToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveDuplicateSelectedScalarToVector.template",new Dictionary {["TestName"] = "SveDuplicateSelectedScalarToVector_int", ["Isa"] = "Sve", ["Method"] = "DuplicateSelectedScalarToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveDuplicateSelectedScalarToVector.template",new Dictionary {["TestName"] = "SveDuplicateSelectedScalarToVector_long", ["Isa"] = "Sve", ["Method"] = "DuplicateSelectedScalarToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveDuplicateSelectedScalarToVector.template",new Dictionary {["TestName"] = "SveDuplicateSelectedScalarToVector_byte", ["Isa"] = "Sve", ["Method"] = "DuplicateSelectedScalarToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveDuplicateSelectedScalarToVector.template",new Dictionary {["TestName"] = "SveDuplicateSelectedScalarToVector_ushort", ["Isa"] = "Sve", ["Method"] = "DuplicateSelectedScalarToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveDuplicateSelectedScalarToVector.template",new Dictionary {["TestName"] = "SveDuplicateSelectedScalarToVector_uint", ["Isa"] = "Sve", ["Method"] = "DuplicateSelectedScalarToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveDuplicateSelectedScalarToVector.template",new Dictionary {["TestName"] = "SveDuplicateSelectedScalarToVector_ulong", ["Isa"] = "Sve", ["Method"] = "DuplicateSelectedScalarToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveReverseBits.template", new Dictionary { ["TestName"] = "SveReverseBits_sbyte", ["Isa"] = "Sve", ["Method"] = "ReverseBits", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveReverseBits.template", new Dictionary { ["TestName"] = "SveReverseBits_short", ["Isa"] = "Sve", ["Method"] = "ReverseBits", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveReverseBits.template", new Dictionary { ["TestName"] = "SveReverseBits_int", ["Isa"] = "Sve", ["Method"] = "ReverseBits", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveReverseBits.template", new Dictionary { ["TestName"] = "SveReverseBits_long", ["Isa"] = "Sve", ["Method"] = "ReverseBits", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveReverseBits.template", new Dictionary { ["TestName"] = "SveReverseBits_byte", ["Isa"] = "Sve", ["Method"] = "ReverseBits", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveReverseBits.template", new Dictionary { ["TestName"] = "SveReverseBits_ushort", ["Isa"] = "Sve", ["Method"] = "ReverseBits", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveReverseBits.template", new Dictionary { ["TestName"] = "SveReverseBits_uint", ["Isa"] = "Sve", ["Method"] = "ReverseBits", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveReverseBits.template", new Dictionary { ["TestName"] = "SveReverseBits_ulong", ["Isa"] = "Sve", ["Method"] = "ReverseBits", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveReverseElement.template", new Dictionary { ["TestName"] = "SveReverseElement_float", ["Isa"] = "Sve", ["Method"] = "ReverseElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveReverseElement.template", new Dictionary { ["TestName"] = "SveReverseElement_double", ["Isa"] = "Sve", ["Method"] = "ReverseElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveReverseElement.template", new Dictionary { ["TestName"] = "SveReverseElement_sbyte", ["Isa"] = "Sve", ["Method"] = "ReverseElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveReverseElement.template", new Dictionary { ["TestName"] = "SveReverseElement_short", ["Isa"] = "Sve", ["Method"] = "ReverseElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveReverseElement.template", new Dictionary { ["TestName"] = "SveReverseElement_int", ["Isa"] = "Sve", ["Method"] = "ReverseElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveReverseElement.template", new Dictionary { ["TestName"] = "SveReverseElement_long", ["Isa"] = "Sve", ["Method"] = "ReverseElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveReverseElement.template", new Dictionary { ["TestName"] = "SveReverseElement_byte", ["Isa"] = "Sve", ["Method"] = "ReverseElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveReverseElement.template", new Dictionary { ["TestName"] = "SveReverseElement_ushort", ["Isa"] = "Sve", ["Method"] = "ReverseElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveReverseElement.template", new Dictionary { ["TestName"] = "SveReverseElement_uint", ["Isa"] = "Sve", ["Method"] = "ReverseElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveReverseElement.template", new Dictionary { ["TestName"] = "SveReverseElement_ulong", ["Isa"] = "Sve", ["Method"] = "ReverseElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveReverseElement16.template", new Dictionary { ["TestName"] = "SveReverseElement16_int", ["Isa"] = "Sve", ["Method"] = "ReverseElement16", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveReverseElement16.template", new Dictionary { ["TestName"] = "SveReverseElement16_long", ["Isa"] = "Sve", ["Method"] = "ReverseElement16", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveReverseElement16.template", new Dictionary { ["TestName"] = "SveReverseElement16_uint", ["Isa"] = "Sve", ["Method"] = "ReverseElement16", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveReverseElement16.template", new Dictionary { ["TestName"] = "SveReverseElement16_ulong", ["Isa"] = "Sve", ["Method"] = "ReverseElement16", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveReverseElement32.template", new Dictionary { ["TestName"] = "SveReverseElement32_long", ["Isa"] = "Sve", ["Method"] = "ReverseElement32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveReverseElement32.template", new Dictionary { ["TestName"] = "SveReverseElement32_ulong", ["Isa"] = "Sve", ["Method"] = "ReverseElement32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveReverseElement8.template", new Dictionary { ["TestName"] = "SveReverseElement8_short", ["Isa"] = "Sve", ["Method"] = "ReverseElement8", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveReverseElement8.template", new Dictionary { ["TestName"] = "SveReverseElement8_int", ["Isa"] = "Sve", ["Method"] = "ReverseElement8", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveReverseElement8.template", new Dictionary { ["TestName"] = "SveReverseElement8_long", ["Isa"] = "Sve", ["Method"] = "ReverseElement8", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveReverseElement8.template", new Dictionary { ["TestName"] = "SveReverseElement8_ushort", ["Isa"] = "Sve", ["Method"] = "ReverseElement8", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveReverseElement8.template", new Dictionary { ["TestName"] = "SveReverseElement8_uint", ["Isa"] = "Sve", ["Method"] = "ReverseElement8", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveReverseElement8.template", new Dictionary { ["TestName"] = "SveReverseElement8_ulong", ["Isa"] = "Sve", ["Method"] = "ReverseElement8", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveSplice.template", new Dictionary { ["TestName"] = "SveSplice_float", ["Isa"] = "Sve", ["Method"] = "Splice", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveSplice.template", new Dictionary { ["TestName"] = "SveSplice_double", ["Isa"] = "Sve", ["Method"] = "Splice", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveSplice.template", new Dictionary { ["TestName"] = "SveSplice_sbyte", ["Isa"] = "Sve", ["Method"] = "Splice", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveSplice.template", new Dictionary { ["TestName"] = "SveSplice_short", ["Isa"] = "Sve", ["Method"] = "Splice", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveSplice.template", new Dictionary { ["TestName"] = "SveSplice_int", ["Isa"] = "Sve", ["Method"] = "Splice", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveSplice.template", new Dictionary { ["TestName"] = "SveSplice_long", ["Isa"] = "Sve", ["Method"] = "Splice", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveSplice.template", new Dictionary { ["TestName"] = "SveSplice_byte", ["Isa"] = "Sve", ["Method"] = "Splice", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveSplice.template", new Dictionary { ["TestName"] = "SveSplice_ushort", ["Isa"] = "Sve", ["Method"] = "Splice", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveSplice.template", new Dictionary { ["TestName"] = "SveSplice_uint", ["Isa"] = "Sve", ["Method"] = "Splice", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveSplice.template", new Dictionary { ["TestName"] = "SveSplice_ulong", ["Isa"] = "Sve", ["Method"] = "Splice", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveTransposeEven.template", new Dictionary { ["TestName"] = "SveTransposeEven_float", ["Isa"] = "Sve", ["Method"] = "TransposeEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveTransposeEven.template", new Dictionary { ["TestName"] = "SveTransposeEven_double", ["Isa"] = "Sve", ["Method"] = "TransposeEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveTransposeEven.template", new Dictionary { ["TestName"] = "SveTransposeEven_sbyte", ["Isa"] = "Sve", ["Method"] = "TransposeEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveTransposeEven.template", new Dictionary { ["TestName"] = "SveTransposeEven_short", ["Isa"] = "Sve", ["Method"] = "TransposeEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveTransposeEven.template", new Dictionary { ["TestName"] = "SveTransposeEven_int", ["Isa"] = "Sve", ["Method"] = "TransposeEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveTransposeEven.template", new Dictionary { ["TestName"] = "SveTransposeEven_long", ["Isa"] = "Sve", ["Method"] = "TransposeEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveTransposeEven.template", new Dictionary { ["TestName"] = "SveTransposeEven_byte", ["Isa"] = "Sve", ["Method"] = "TransposeEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveTransposeEven.template", new Dictionary { ["TestName"] = "SveTransposeEven_ushort", ["Isa"] = "Sve", ["Method"] = "TransposeEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveTransposeEven.template", new Dictionary { ["TestName"] = "SveTransposeEven_uint", ["Isa"] = "Sve", ["Method"] = "TransposeEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveTransposeEven.template", new Dictionary { ["TestName"] = "SveTransposeEven_ulong", ["Isa"] = "Sve", ["Method"] = "TransposeEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveTransposeOdd.template", new Dictionary { ["TestName"] = "SveTransposeOdd_float", ["Isa"] = "Sve", ["Method"] = "TransposeOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveTransposeOdd.template", new Dictionary { ["TestName"] = "SveTransposeOdd_double", ["Isa"] = "Sve", ["Method"] = "TransposeOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveTransposeOdd.template", new Dictionary { ["TestName"] = "SveTransposeOdd_sbyte", ["Isa"] = "Sve", ["Method"] = "TransposeOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveTransposeOdd.template", new Dictionary { ["TestName"] = "SveTransposeOdd_short", ["Isa"] = "Sve", ["Method"] = "TransposeOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveTransposeOdd.template", new Dictionary { ["TestName"] = "SveTransposeOdd_int", ["Isa"] = "Sve", ["Method"] = "TransposeOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveTransposeOdd.template", new Dictionary { ["TestName"] = "SveTransposeOdd_long", ["Isa"] = "Sve", ["Method"] = "TransposeOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveTransposeOdd.template", new Dictionary { ["TestName"] = "SveTransposeOdd_byte", ["Isa"] = "Sve", ["Method"] = "TransposeOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveTransposeOdd.template", new Dictionary { ["TestName"] = "SveTransposeOdd_ushort", ["Isa"] = "Sve", ["Method"] = "TransposeOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveTransposeOdd.template", new Dictionary { ["TestName"] = "SveTransposeOdd_uint", ["Isa"] = "Sve", ["Method"] = "TransposeOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveTransposeOdd.template", new Dictionary { ["TestName"] = "SveTransposeOdd_ulong", ["Isa"] = "Sve", ["Method"] = "TransposeOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveUnzipEven.template", new Dictionary { ["TestName"] = "SveUnzipEven_float", ["Isa"] = "Sve", ["Method"] = "UnzipEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveUnzipEven.template", new Dictionary { ["TestName"] = "SveUnzipEven_double", ["Isa"] = "Sve", ["Method"] = "UnzipEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveUnzipEven.template", new Dictionary { ["TestName"] = "SveUnzipEven_sbyte", ["Isa"] = "Sve", ["Method"] = "UnzipEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveUnzipEven.template", new Dictionary { ["TestName"] = "SveUnzipEven_short", ["Isa"] = "Sve", ["Method"] = "UnzipEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveUnzipEven.template", new Dictionary { ["TestName"] = "SveUnzipEven_int", ["Isa"] = "Sve", ["Method"] = "UnzipEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveUnzipEven.template", new Dictionary { ["TestName"] = "SveUnzipEven_long", ["Isa"] = "Sve", ["Method"] = "UnzipEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveUnzipEven.template", new Dictionary { ["TestName"] = "SveUnzipEven_byte", ["Isa"] = "Sve", ["Method"] = "UnzipEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveUnzipEven.template", new Dictionary { ["TestName"] = "SveUnzipEven_ushort", ["Isa"] = "Sve", ["Method"] = "UnzipEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveUnzipEven.template", new Dictionary { ["TestName"] = "SveUnzipEven_uint", ["Isa"] = "Sve", ["Method"] = "UnzipEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveUnzipEven.template", new Dictionary { ["TestName"] = "SveUnzipEven_ulong", ["Isa"] = "Sve", ["Method"] = "UnzipEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveUnzipOdd.template", new Dictionary { ["TestName"] = "SveUnzipOdd_float", ["Isa"] = "Sve", ["Method"] = "UnzipOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveUnzipOdd.template", new Dictionary { ["TestName"] = "SveUnzipOdd_double", ["Isa"] = "Sve", ["Method"] = "UnzipOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveUnzipOdd.template", new Dictionary { ["TestName"] = "SveUnzipOdd_sbyte", ["Isa"] = "Sve", ["Method"] = "UnzipOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveUnzipOdd.template", new Dictionary { ["TestName"] = "SveUnzipOdd_short", ["Isa"] = "Sve", ["Method"] = "UnzipOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveUnzipOdd.template", new Dictionary { ["TestName"] = "SveUnzipOdd_int", ["Isa"] = "Sve", ["Method"] = "UnzipOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveUnzipOdd.template", new Dictionary { ["TestName"] = "SveUnzipOdd_long", ["Isa"] = "Sve", ["Method"] = "UnzipOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveUnzipOdd.template", new Dictionary { ["TestName"] = "SveUnzipOdd_byte", ["Isa"] = "Sve", ["Method"] = "UnzipOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveUnzipOdd.template", new Dictionary { ["TestName"] = "SveUnzipOdd_ushort", ["Isa"] = "Sve", ["Method"] = "UnzipOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveUnzipOdd.template", new Dictionary { ["TestName"] = "SveUnzipOdd_uint", ["Isa"] = "Sve", ["Method"] = "UnzipOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveUnzipOdd.template", new Dictionary { ["TestName"] = "SveUnzipOdd_ulong", ["Isa"] = "Sve", ["Method"] = "UnzipOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveVectorTableLookup.template", new Dictionary { ["TestName"] = "SveVectorTableLookup_float_uint", ["Isa"] = "Sve", ["Method"] = "VectorTableLookup", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveVectorTableLookup.template", new Dictionary { ["TestName"] = "SveVectorTableLookup_double_ulong", ["Isa"] = "Sve", ["Method"] = "VectorTableLookup", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveVectorTableLookup.template", new Dictionary { ["TestName"] = "SveVectorTableLookup_sbyte_byte", ["Isa"] = "Sve", ["Method"] = "VectorTableLookup", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveVectorTableLookup.template", new Dictionary { ["TestName"] = "SveVectorTableLookup_short_ushort", ["Isa"] = "Sve", ["Method"] = "VectorTableLookup", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveVectorTableLookup.template", new Dictionary { ["TestName"] = "SveVectorTableLookup_int_uint", ["Isa"] = "Sve", ["Method"] = "VectorTableLookup", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveVectorTableLookup.template", new Dictionary { ["TestName"] = "SveVectorTableLookup_long_ulong", ["Isa"] = "Sve", ["Method"] = "VectorTableLookup", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveVectorTableLookup.template", new Dictionary { ["TestName"] = "SveVectorTableLookup_byte", ["Isa"] = "Sve", ["Method"] = "VectorTableLookup", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveVectorTableLookup.template", new Dictionary { ["TestName"] = "SveVectorTableLookup_ushort", ["Isa"] = "Sve", ["Method"] = "VectorTableLookup", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveVectorTableLookup.template", new Dictionary { ["TestName"] = "SveVectorTableLookup_uint", ["Isa"] = "Sve", ["Method"] = "VectorTableLookup", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveVectorTableLookup.template", new Dictionary { ["TestName"] = "SveVectorTableLookup_ulong", ["Isa"] = "Sve", ["Method"] = "VectorTableLookup", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveZipHigh.template", new Dictionary { ["TestName"] = "SveZipHigh_float", ["Isa"] = "Sve", ["Method"] = "ZipHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveZipHigh.template", new Dictionary { ["TestName"] = "SveZipHigh_double", ["Isa"] = "Sve", ["Method"] = "ZipHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveZipHigh.template", new Dictionary { ["TestName"] = "SveZipHigh_sbyte", ["Isa"] = "Sve", ["Method"] = "ZipHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveZipHigh.template", new Dictionary { ["TestName"] = "SveZipHigh_short", ["Isa"] = "Sve", ["Method"] = "ZipHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveZipHigh.template", new Dictionary { ["TestName"] = "SveZipHigh_int", ["Isa"] = "Sve", ["Method"] = "ZipHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveZipHigh.template", new Dictionary { ["TestName"] = "SveZipHigh_long", ["Isa"] = "Sve", ["Method"] = "ZipHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveZipHigh.template", new Dictionary { ["TestName"] = "SveZipHigh_byte", ["Isa"] = "Sve", ["Method"] = "ZipHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveZipHigh.template", new Dictionary { ["TestName"] = "SveZipHigh_ushort", ["Isa"] = "Sve", ["Method"] = "ZipHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveZipHigh.template", new Dictionary { ["TestName"] = "SveZipHigh_uint", ["Isa"] = "Sve", ["Method"] = "ZipHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveZipHigh.template", new Dictionary { ["TestName"] = "SveZipHigh_ulong", ["Isa"] = "Sve", ["Method"] = "ZipHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveZipLow.template", new Dictionary { ["TestName"] = "SveZipLow_float", ["Isa"] = "Sve", ["Method"] = "ZipLow", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveZipLow.template", new Dictionary { ["TestName"] = "SveZipLow_double", ["Isa"] = "Sve", ["Method"] = "ZipLow", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveZipLow.template", new Dictionary { ["TestName"] = "SveZipLow_sbyte", ["Isa"] = "Sve", ["Method"] = "ZipLow", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveZipLow.template", new Dictionary { ["TestName"] = "SveZipLow_short", ["Isa"] = "Sve", ["Method"] = "ZipLow", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveZipLow.template", new Dictionary { ["TestName"] = "SveZipLow_int", ["Isa"] = "Sve", ["Method"] = "ZipLow", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveZipLow.template", new Dictionary { ["TestName"] = "SveZipLow_long", ["Isa"] = "Sve", ["Method"] = "ZipLow", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveZipLow.template", new Dictionary { ["TestName"] = "SveZipLow_byte", ["Isa"] = "Sve", ["Method"] = "ZipLow", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveZipLow.template", new Dictionary { ["TestName"] = "SveZipLow_ushort", ["Isa"] = "Sve", ["Method"] = "ZipLow", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveZipLow.template", new Dictionary { ["TestName"] = "SveZipLow_uint", ["Isa"] = "Sve", ["Method"] = "ZipLow", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveZipLow.template", new Dictionary { ["TestName"] = "SveZipLow_ulong", ["Isa"] = "Sve", ["Method"] = "ZipLow", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + + +// Sve2 scatterstores + ("Sve2Scatter16BitNarrowing.template",new Dictionary { ["TestName"] = "Sve2Scatter16BitNarrowing_int_uint", ["Isa"] = "Sve2", ["Method"] = "Scatter16BitNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2Scatter16BitNarrowing.template",new Dictionary { ["TestName"] = "Sve2Scatter16BitNarrowing_uint", ["Isa"] = "Sve2", ["Method"] = "Scatter16BitNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2Scatter16BitNarrowing.template",new Dictionary { ["TestName"] = "Sve2Scatter16BitNarrowing_long_ulong", ["Isa"] = "Sve2", ["Method"] = "Scatter16BitNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2Scatter16BitNarrowing.template",new Dictionary { ["TestName"] = "Sve2Scatter16BitNarrowing_ulong", ["Isa"] = "Sve2", ["Method"] = "Scatter16BitNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2Scatter16BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "Sve2Scatter16BitWithByteOffsetsNarrowing_int_uint", ["Isa"] = "Sve2", ["Method"] = "Scatter16BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int32",["LargestVectorSize"] = "8",}), + ("Sve2Scatter16BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "Sve2Scatter16BitWithByteOffsetsNarrowing_uint", ["Isa"] = "Sve2", ["Method"] = "Scatter16BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt32",["LargestVectorSize"] = "8",}), + ("Sve2Scatter16BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "Sve2Scatter16BitWithByteOffsetsNarrowing_long", ["Isa"] = "Sve2", ["Method"] = "Scatter16BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int64",["LargestVectorSize"] = "8",}), + ("Sve2Scatter16BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "Sve2Scatter16BitWithByteOffsetsNarrowing_ulong_long", ["Isa"] = "Sve2", ["Method"] = "Scatter16BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2Scatter16BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "Sve2Scatter16BitWithByteOffsetsNarrowing_long_ulong", ["Isa"] = "Sve2", ["Method"] = "Scatter16BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int64",["LargestVectorSize"] = "8",}), + ("Sve2Scatter16BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "Sve2Scatter16BitWithByteOffsetsNarrowing_ulong", ["Isa"] = "Sve2", ["Method"] = "Scatter16BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2Scatter16BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "Sve2Scatter16BitWithByteOffsetsNarrowing_long", ["Isa"] = "Sve2", ["Method"] = "Scatter16BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int64",["LargestVectorSize"] = "8",}), + ("Sve2Scatter16BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "Sve2Scatter16BitWithByteOffsetsNarrowing_ulong_long", ["Isa"] = "Sve2", ["Method"] = "Scatter16BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2Scatter16BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "Sve2Scatter16BitWithByteOffsetsNarrowing_long_ulong", ["Isa"] = "Sve2", ["Method"] = "Scatter16BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int64",["LargestVectorSize"] = "8",}), + ("Sve2Scatter16BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "Sve2Scatter16BitWithByteOffsetsNarrowing_ulong", ["Isa"] = "Sve2", ["Method"] = "Scatter16BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2Scatter32BitNarrowing.template",new Dictionary { ["TestName"] = "Sve2Scatter32BitNarrowing_long_ulong", ["Isa"] = "Sve2", ["Method"] = "Scatter32BitNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2Scatter32BitNarrowing.template",new Dictionary { ["TestName"] = "Sve2Scatter32BitNarrowing_ulong", ["Isa"] = "Sve2", ["Method"] = "Scatter32BitNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2Scatter32BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "Sve2Scatter32BitWithByteOffsetsNarrowing_long", ["Isa"] = "Sve2", ["Method"] = "Scatter32BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int64",["LargestVectorSize"] = "8",}), + ("Sve2Scatter32BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "Sve2Scatter32BitWithByteOffsetsNarrowing_ulong_long", ["Isa"] = "Sve2", ["Method"] = "Scatter32BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2Scatter32BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "Sve2Scatter32BitWithByteOffsetsNarrowing_long_ulong", ["Isa"] = "Sve2", ["Method"] = "Scatter32BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int64",["LargestVectorSize"] = "8",}), + ("Sve2Scatter32BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "Sve2Scatter32BitWithByteOffsetsNarrowing_ulong", ["Isa"] = "Sve2", ["Method"] = "Scatter32BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2Scatter32BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "Sve2Scatter32BitWithByteOffsetsNarrowing_long", ["Isa"] = "Sve2", ["Method"] = "Scatter32BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int64",["LargestVectorSize"] = "8",}), + ("Sve2Scatter32BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "Sve2Scatter32BitWithByteOffsetsNarrowing_ulong_long", ["Isa"] = "Sve2", ["Method"] = "Scatter32BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2Scatter32BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "Sve2Scatter32BitWithByteOffsetsNarrowing_long_ulong", ["Isa"] = "Sve2", ["Method"] = "Scatter32BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int64",["LargestVectorSize"] = "8",}), + ("Sve2Scatter32BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "Sve2Scatter32BitWithByteOffsetsNarrowing_ulong", ["Isa"] = "Sve2", ["Method"] = "Scatter32BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2Scatter8BitNarrowing.template", new Dictionary { ["TestName"] = "Sve2Scatter8BitNarrowing_int_uint", ["Isa"] = "Sve2", ["Method"] = "Scatter8BitNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2Scatter8BitNarrowing.template", new Dictionary { ["TestName"] = "Sve2Scatter8BitNarrowing_uint", ["Isa"] = "Sve2", ["Method"] = "Scatter8BitNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2Scatter8BitNarrowing.template", new Dictionary { ["TestName"] = "Sve2Scatter8BitNarrowing_long_ulong", ["Isa"] = "Sve2", ["Method"] = "Scatter8BitNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2Scatter8BitNarrowing.template", new Dictionary { ["TestName"] = "Sve2Scatter8BitNarrowing_ulong", ["Isa"] = "Sve2", ["Method"] = "Scatter8BitNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2Scatter8BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "Sve2Scatter8BitWithByteOffsetsNarrowing_int_uint", ["Isa"] = "Sve2", ["Method"] = "Scatter8BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int32",["LargestVectorSize"] = "8",}), + ("Sve2Scatter8BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "Sve2Scatter8BitWithByteOffsetsNarrowing_uint", ["Isa"] = "Sve2", ["Method"] = "Scatter8BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt32",["LargestVectorSize"] = "8",}), + ("Sve2Scatter8BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "Sve2Scatter8BitWithByteOffsetsNarrowing_long", ["Isa"] = "Sve2", ["Method"] = "Scatter8BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int64",["LargestVectorSize"] = "8",}), + ("Sve2Scatter8BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "Sve2Scatter8BitWithByteOffsetsNarrowing_ulong_long", ["Isa"] = "Sve2", ["Method"] = "Scatter8BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2Scatter8BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "Sve2Scatter8BitWithByteOffsetsNarrowing_long_ulong", ["Isa"] = "Sve2", ["Method"] = "Scatter8BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int64",["LargestVectorSize"] = "8",}), + ("Sve2Scatter8BitWithByteOffsetsNarrowing.template",new Dictionary {["TestName"] = "Sve2Scatter8BitWithByteOffsetsNarrowing_ulong", ["Isa"] = "Sve2", ["Method"] = "Scatter8BitWithByteOffsetsNarrowing", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2ScatterNonTemporal.template", new Dictionary { ["TestName"] = "Sve2ScatterNonTemporal_float_uint", ["Isa"] = "Sve2", ["Method"] = "ScatterNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("Sve2ScatterNonTemporal.template", new Dictionary { ["TestName"] = "Sve2ScatterNonTemporal_int_uint", ["Isa"] = "Sve2", ["Method"] = "ScatterNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2ScatterNonTemporal.template", new Dictionary { ["TestName"] = "Sve2ScatterNonTemporal_uint", ["Isa"] = "Sve2", ["Method"] = "ScatterNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2ScatterNonTemporal.template", new Dictionary { ["TestName"] = "Sve2ScatterNonTemporal_double_ulong", ["Isa"] = "Sve2", ["Method"] = "ScatterNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("Sve2ScatterNonTemporal.template", new Dictionary { ["TestName"] = "Sve2ScatterNonTemporal_long_ulong", ["Isa"] = "Sve2", ["Method"] = "ScatterNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2ScatterNonTemporal.template", new Dictionary { ["TestName"] = "Sve2ScatterNonTemporal_ulong", ["Isa"] = "Sve2", ["Method"] = "ScatterNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2ScatterNonTemporal.template", new Dictionary { ["TestName"] = "Sve2ScatterNonTemporal_float_uint", ["Isa"] = "Sve2", ["Method"] = "ScatterNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Single",["LargestVectorSize"] = "8",}), + ("Sve2ScatterNonTemporal.template", new Dictionary { ["TestName"] = "Sve2ScatterNonTemporal_int_uint", ["Isa"] = "Sve2", ["Method"] = "ScatterNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int32",["LargestVectorSize"] = "8",}), + ("Sve2ScatterNonTemporal.template", new Dictionary { ["TestName"] = "Sve2ScatterNonTemporal_uint", ["Isa"] = "Sve2", ["Method"] = "ScatterNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt32",["LargestVectorSize"] = "8",}), + ("Sve2ScatterNonTemporal.template", new Dictionary { ["TestName"] = "Sve2ScatterNonTemporal_double_long", ["Isa"] = "Sve2", ["Method"] = "ScatterNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Double",["LargestVectorSize"] = "8",}), + ("Sve2ScatterNonTemporal.template", new Dictionary { ["TestName"] = "Sve2ScatterNonTemporal_long", ["Isa"] = "Sve2", ["Method"] = "ScatterNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int64",["LargestVectorSize"] = "8",}), + ("Sve2ScatterNonTemporal.template", new Dictionary { ["TestName"] = "Sve2ScatterNonTemporal_ulong_long", ["Isa"] = "Sve2", ["Method"] = "ScatterNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2ScatterNonTemporal.template", new Dictionary { ["TestName"] = "Sve2ScatterNonTemporal_double_ulong", ["Isa"] = "Sve2", ["Method"] = "ScatterNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Double",["LargestVectorSize"] = "8",}), + ("Sve2ScatterNonTemporal.template", new Dictionary { ["TestName"] = "Sve2ScatterNonTemporal_long_ulong", ["Isa"] = "Sve2", ["Method"] = "ScatterNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int64",["LargestVectorSize"] = "8",}), + ("Sve2ScatterNonTemporal.template", new Dictionary { ["TestName"] = "Sve2ScatterNonTemporal_ulong", ["Isa"] = "Sve2", ["Method"] = "ScatterNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2ScatterNonTemporal.template", new Dictionary { ["TestName"] = "Sve2ScatterNonTemporal_double_long", ["Isa"] = "Sve2", ["Method"] = "ScatterNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Double",["LargestVectorSize"] = "8",}), + ("Sve2ScatterNonTemporal.template", new Dictionary { ["TestName"] = "Sve2ScatterNonTemporal_long", ["Isa"] = "Sve2", ["Method"] = "ScatterNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int64",["LargestVectorSize"] = "8",}), + ("Sve2ScatterNonTemporal.template", new Dictionary { ["TestName"] = "Sve2ScatterNonTemporal_ulong_long", ["Isa"] = "Sve2", ["Method"] = "ScatterNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2ScatterNonTemporal.template", new Dictionary { ["TestName"] = "Sve2ScatterNonTemporal_double_ulong", ["Isa"] = "Sve2", ["Method"] = "ScatterNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Double",["LargestVectorSize"] = "8",}), + ("Sve2ScatterNonTemporal.template", new Dictionary { ["TestName"] = "Sve2ScatterNonTemporal_long_ulong", ["Isa"] = "Sve2", ["Method"] = "ScatterNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "Int64",["LargestVectorSize"] = "8",}), + ("Sve2ScatterNonTemporal.template", new Dictionary { ["TestName"] = "Sve2ScatterNonTemporal_ulong", ["Isa"] = "Sve2", ["Method"] = "ScatterNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4VectorType"] = "Vector",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + + +// Sve2 maths + ("Sve2AbsoluteDifferenceAdd.template",new Dictionary { ["TestName"] = "Sve2AbsoluteDifferenceAdd_sbyte", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceAdd.template",new Dictionary { ["TestName"] = "Sve2AbsoluteDifferenceAdd_short", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceAdd.template",new Dictionary { ["TestName"] = "Sve2AbsoluteDifferenceAdd_int", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceAdd.template",new Dictionary { ["TestName"] = "Sve2AbsoluteDifferenceAdd_long", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceAdd.template",new Dictionary { ["TestName"] = "Sve2AbsoluteDifferenceAdd_byte", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceAdd.template",new Dictionary { ["TestName"] = "Sve2AbsoluteDifferenceAdd_ushort", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceAdd.template",new Dictionary { ["TestName"] = "Sve2AbsoluteDifferenceAdd_uint", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceAdd.template",new Dictionary { ["TestName"] = "Sve2AbsoluteDifferenceAdd_ulong", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceAddWideningLower.template",new Dictionary {["TestName"] = "Sve2AbsoluteDifferenceAddWideningLower_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceAddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceAddWideningLower.template",new Dictionary {["TestName"] = "Sve2AbsoluteDifferenceAddWideningLower_int_short", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceAddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceAddWideningLower.template",new Dictionary {["TestName"] = "Sve2AbsoluteDifferenceAddWideningLower_long_int", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceAddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceAddWideningLower.template",new Dictionary {["TestName"] = "Sve2AbsoluteDifferenceAddWideningLower_ushort_byte", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceAddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceAddWideningLower.template",new Dictionary {["TestName"] = "Sve2AbsoluteDifferenceAddWideningLower_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceAddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceAddWideningLower.template",new Dictionary {["TestName"] = "Sve2AbsoluteDifferenceAddWideningLower_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceAddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceAddWideningUpper.template",new Dictionary {["TestName"] = "Sve2AbsoluteDifferenceAddWideningUpper_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceAddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceAddWideningUpper.template",new Dictionary {["TestName"] = "Sve2AbsoluteDifferenceAddWideningUpper_int_short", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceAddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceAddWideningUpper.template",new Dictionary {["TestName"] = "Sve2AbsoluteDifferenceAddWideningUpper_long_int", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceAddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceAddWideningUpper.template",new Dictionary {["TestName"] = "Sve2AbsoluteDifferenceAddWideningUpper_ushort_byte", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceAddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceAddWideningUpper.template",new Dictionary {["TestName"] = "Sve2AbsoluteDifferenceAddWideningUpper_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceAddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceAddWideningUpper.template",new Dictionary {["TestName"] = "Sve2AbsoluteDifferenceAddWideningUpper_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceAddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceWideningLower.template",new Dictionary {["TestName"] = "Sve2AbsoluteDifferenceWideningLower_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceWideningLower.template",new Dictionary {["TestName"] = "Sve2AbsoluteDifferenceWideningLower_int_short", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceWideningLower.template",new Dictionary {["TestName"] = "Sve2AbsoluteDifferenceWideningLower_long_int", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceWideningLower.template",new Dictionary {["TestName"] = "Sve2AbsoluteDifferenceWideningLower_ushort_byte", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceWideningLower.template",new Dictionary {["TestName"] = "Sve2AbsoluteDifferenceWideningLower_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceWideningLower.template",new Dictionary {["TestName"] = "Sve2AbsoluteDifferenceWideningLower_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceWideningUpper.template",new Dictionary {["TestName"] = "Sve2AbsoluteDifferenceWideningUpper_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceWideningUpper.template",new Dictionary {["TestName"] = "Sve2AbsoluteDifferenceWideningUpper_int_short", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceWideningUpper.template",new Dictionary {["TestName"] = "Sve2AbsoluteDifferenceWideningUpper_long_int", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceWideningUpper.template",new Dictionary {["TestName"] = "Sve2AbsoluteDifferenceWideningUpper_ushort_byte", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceWideningUpper.template",new Dictionary {["TestName"] = "Sve2AbsoluteDifferenceWideningUpper_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2AbsoluteDifferenceWideningUpper.template",new Dictionary {["TestName"] = "Sve2AbsoluteDifferenceWideningUpper_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "AbsoluteDifferenceWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2AddCarryWideningLower.template",new Dictionary { ["TestName"] = "Sve2AddCarryWideningLower_uint", ["Isa"] = "Sve2", ["Method"] = "AddCarryWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2AddCarryWideningLower.template",new Dictionary { ["TestName"] = "Sve2AddCarryWideningLower_ulong", ["Isa"] = "Sve2", ["Method"] = "AddCarryWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2AddCarryWideningUpper.template",new Dictionary { ["TestName"] = "Sve2AddCarryWideningUpper_uint", ["Isa"] = "Sve2", ["Method"] = "AddCarryWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2AddCarryWideningUpper.template",new Dictionary { ["TestName"] = "Sve2AddCarryWideningUpper_ulong", ["Isa"] = "Sve2", ["Method"] = "AddCarryWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2AddHighNarowingLower.template", new Dictionary { ["TestName"] = "Sve2AddHighNarowingLower_sbyte_short", ["Isa"] = "Sve2", ["Method"] = "AddHighNarowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2AddHighNarowingLower.template", new Dictionary { ["TestName"] = "Sve2AddHighNarowingLower_short_int", ["Isa"] = "Sve2", ["Method"] = "AddHighNarowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2AddHighNarowingLower.template", new Dictionary { ["TestName"] = "Sve2AddHighNarowingLower_int_long", ["Isa"] = "Sve2", ["Method"] = "AddHighNarowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2AddHighNarowingLower.template", new Dictionary { ["TestName"] = "Sve2AddHighNarowingLower_byte_ushort", ["Isa"] = "Sve2", ["Method"] = "AddHighNarowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2AddHighNarowingLower.template", new Dictionary { ["TestName"] = "Sve2AddHighNarowingLower_ushort_uint", ["Isa"] = "Sve2", ["Method"] = "AddHighNarowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2AddHighNarowingLower.template", new Dictionary { ["TestName"] = "Sve2AddHighNarowingLower_uint_ulong", ["Isa"] = "Sve2", ["Method"] = "AddHighNarowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2AddHighNarowingUpper.template", new Dictionary { ["TestName"] = "Sve2AddHighNarowingUpper_sbyte_short", ["Isa"] = "Sve2", ["Method"] = "AddHighNarowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2AddHighNarowingUpper.template", new Dictionary { ["TestName"] = "Sve2AddHighNarowingUpper_short_int", ["Isa"] = "Sve2", ["Method"] = "AddHighNarowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2AddHighNarowingUpper.template", new Dictionary { ["TestName"] = "Sve2AddHighNarowingUpper_int_long", ["Isa"] = "Sve2", ["Method"] = "AddHighNarowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2AddHighNarowingUpper.template", new Dictionary { ["TestName"] = "Sve2AddHighNarowingUpper_byte_ushort", ["Isa"] = "Sve2", ["Method"] = "AddHighNarowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2AddHighNarowingUpper.template", new Dictionary { ["TestName"] = "Sve2AddHighNarowingUpper_ushort_uint", ["Isa"] = "Sve2", ["Method"] = "AddHighNarowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2AddHighNarowingUpper.template", new Dictionary { ["TestName"] = "Sve2AddHighNarowingUpper_uint_ulong", ["Isa"] = "Sve2", ["Method"] = "AddHighNarowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2AddPairwise.template", new Dictionary { ["TestName"] = "Sve2AddPairwise_float", ["Isa"] = "Sve2", ["Method"] = "AddPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("Sve2AddPairwise.template", new Dictionary { ["TestName"] = "Sve2AddPairwise_double", ["Isa"] = "Sve2", ["Method"] = "AddPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("Sve2AddPairwise.template", new Dictionary { ["TestName"] = "Sve2AddPairwise_sbyte", ["Isa"] = "Sve2", ["Method"] = "AddPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2AddPairwise.template", new Dictionary { ["TestName"] = "Sve2AddPairwise_short", ["Isa"] = "Sve2", ["Method"] = "AddPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2AddPairwise.template", new Dictionary { ["TestName"] = "Sve2AddPairwise_int", ["Isa"] = "Sve2", ["Method"] = "AddPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2AddPairwise.template", new Dictionary { ["TestName"] = "Sve2AddPairwise_long", ["Isa"] = "Sve2", ["Method"] = "AddPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2AddPairwise.template", new Dictionary { ["TestName"] = "Sve2AddPairwise_byte", ["Isa"] = "Sve2", ["Method"] = "AddPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2AddPairwise.template", new Dictionary { ["TestName"] = "Sve2AddPairwise_ushort", ["Isa"] = "Sve2", ["Method"] = "AddPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2AddPairwise.template", new Dictionary { ["TestName"] = "Sve2AddPairwise_uint", ["Isa"] = "Sve2", ["Method"] = "AddPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2AddPairwise.template", new Dictionary { ["TestName"] = "Sve2AddPairwise_ulong", ["Isa"] = "Sve2", ["Method"] = "AddPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2AddPairwiseWidening.template", new Dictionary { ["TestName"] = "Sve2AddPairwiseWidening_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "AddPairwiseWidening", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2AddPairwiseWidening.template", new Dictionary { ["TestName"] = "Sve2AddPairwiseWidening_int_short", ["Isa"] = "Sve2", ["Method"] = "AddPairwiseWidening", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2AddPairwiseWidening.template", new Dictionary { ["TestName"] = "Sve2AddPairwiseWidening_long_int", ["Isa"] = "Sve2", ["Method"] = "AddPairwiseWidening", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2AddPairwiseWidening.template", new Dictionary { ["TestName"] = "Sve2AddPairwiseWidening_ushort_byte", ["Isa"] = "Sve2", ["Method"] = "AddPairwiseWidening", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2AddPairwiseWidening.template", new Dictionary { ["TestName"] = "Sve2AddPairwiseWidening_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "AddPairwiseWidening", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2AddPairwiseWidening.template", new Dictionary { ["TestName"] = "Sve2AddPairwiseWidening_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "AddPairwiseWidening", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2AddSaturate.template", new Dictionary { ["TestName"] = "Sve2AddSaturate_sbyte", ["Isa"] = "Sve2", ["Method"] = "AddSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2AddSaturate.template", new Dictionary { ["TestName"] = "Sve2AddSaturate_short", ["Isa"] = "Sve2", ["Method"] = "AddSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2AddSaturate.template", new Dictionary { ["TestName"] = "Sve2AddSaturate_int", ["Isa"] = "Sve2", ["Method"] = "AddSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2AddSaturate.template", new Dictionary { ["TestName"] = "Sve2AddSaturate_long", ["Isa"] = "Sve2", ["Method"] = "AddSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2AddSaturate.template", new Dictionary { ["TestName"] = "Sve2AddSaturate_byte", ["Isa"] = "Sve2", ["Method"] = "AddSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2AddSaturate.template", new Dictionary { ["TestName"] = "Sve2AddSaturate_ushort", ["Isa"] = "Sve2", ["Method"] = "AddSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2AddSaturate.template", new Dictionary { ["TestName"] = "Sve2AddSaturate_uint", ["Isa"] = "Sve2", ["Method"] = "AddSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2AddSaturate.template", new Dictionary { ["TestName"] = "Sve2AddSaturate_ulong", ["Isa"] = "Sve2", ["Method"] = "AddSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2AddSaturateWithSignedAddend.template",new Dictionary {["TestName"] = "Sve2AddSaturateWithSignedAddend_byte_sbyte", ["Isa"] = "Sve2", ["Method"] = "AddSaturateWithSignedAddend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2AddSaturateWithSignedAddend.template",new Dictionary {["TestName"] = "Sve2AddSaturateWithSignedAddend_ushort_short", ["Isa"] = "Sve2", ["Method"] = "AddSaturateWithSignedAddend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2AddSaturateWithSignedAddend.template",new Dictionary {["TestName"] = "Sve2AddSaturateWithSignedAddend_uint_int", ["Isa"] = "Sve2", ["Method"] = "AddSaturateWithSignedAddend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2AddSaturateWithSignedAddend.template",new Dictionary {["TestName"] = "Sve2AddSaturateWithSignedAddend_ulong_long", ["Isa"] = "Sve2", ["Method"] = "AddSaturateWithSignedAddend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2AddSaturateWithUnsignedAddend.template",new Dictionary {["TestName"] = "Sve2AddSaturateWithUnsignedAddend_sbyte_byte", ["Isa"] = "Sve2", ["Method"] = "AddSaturateWithUnsignedAddend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2AddSaturateWithUnsignedAddend.template",new Dictionary {["TestName"] = "Sve2AddSaturateWithUnsignedAddend_short_ushort", ["Isa"] = "Sve2", ["Method"] = "AddSaturateWithUnsignedAddend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2AddSaturateWithUnsignedAddend.template",new Dictionary {["TestName"] = "Sve2AddSaturateWithUnsignedAddend_int_uint", ["Isa"] = "Sve2", ["Method"] = "AddSaturateWithUnsignedAddend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2AddSaturateWithUnsignedAddend.template",new Dictionary {["TestName"] = "Sve2AddSaturateWithUnsignedAddend_long_ulong", ["Isa"] = "Sve2", ["Method"] = "AddSaturateWithUnsignedAddend", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2AddWideLower.template", new Dictionary { ["TestName"] = "Sve2AddWideLower_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "AddWideLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2AddWideLower.template", new Dictionary { ["TestName"] = "Sve2AddWideLower_int_short", ["Isa"] = "Sve2", ["Method"] = "AddWideLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2AddWideLower.template", new Dictionary { ["TestName"] = "Sve2AddWideLower_long_int", ["Isa"] = "Sve2", ["Method"] = "AddWideLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2AddWideLower.template", new Dictionary { ["TestName"] = "Sve2AddWideLower_ushort_byte", ["Isa"] = "Sve2", ["Method"] = "AddWideLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2AddWideLower.template", new Dictionary { ["TestName"] = "Sve2AddWideLower_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "AddWideLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2AddWideLower.template", new Dictionary { ["TestName"] = "Sve2AddWideLower_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "AddWideLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2AddWideUpper.template", new Dictionary { ["TestName"] = "Sve2AddWideUpper_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "AddWideUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2AddWideUpper.template", new Dictionary { ["TestName"] = "Sve2AddWideUpper_int_short", ["Isa"] = "Sve2", ["Method"] = "AddWideUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2AddWideUpper.template", new Dictionary { ["TestName"] = "Sve2AddWideUpper_long_int", ["Isa"] = "Sve2", ["Method"] = "AddWideUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2AddWideUpper.template", new Dictionary { ["TestName"] = "Sve2AddWideUpper_ushort_byte", ["Isa"] = "Sve2", ["Method"] = "AddWideUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2AddWideUpper.template", new Dictionary { ["TestName"] = "Sve2AddWideUpper_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "AddWideUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2AddWideUpper.template", new Dictionary { ["TestName"] = "Sve2AddWideUpper_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "AddWideUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2AddWideningLower.template", new Dictionary { ["TestName"] = "Sve2AddWideningLower_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "AddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2AddWideningLower.template", new Dictionary { ["TestName"] = "Sve2AddWideningLower_int_short", ["Isa"] = "Sve2", ["Method"] = "AddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2AddWideningLower.template", new Dictionary { ["TestName"] = "Sve2AddWideningLower_long_int", ["Isa"] = "Sve2", ["Method"] = "AddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2AddWideningLower.template", new Dictionary { ["TestName"] = "Sve2AddWideningLower_ushort_byte", ["Isa"] = "Sve2", ["Method"] = "AddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2AddWideningLower.template", new Dictionary { ["TestName"] = "Sve2AddWideningLower_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "AddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2AddWideningLower.template", new Dictionary { ["TestName"] = "Sve2AddWideningLower_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "AddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2AddWideningLowerUpper.template",new Dictionary { ["TestName"] = "Sve2AddWideningLowerUpper_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "AddWideningLowerUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2AddWideningLowerUpper.template",new Dictionary { ["TestName"] = "Sve2AddWideningLowerUpper_int_short", ["Isa"] = "Sve2", ["Method"] = "AddWideningLowerUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2AddWideningLowerUpper.template",new Dictionary { ["TestName"] = "Sve2AddWideningLowerUpper_long_int", ["Isa"] = "Sve2", ["Method"] = "AddWideningLowerUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2AddWideningUpper.template", new Dictionary { ["TestName"] = "Sve2AddWideningUpper_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "AddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2AddWideningUpper.template", new Dictionary { ["TestName"] = "Sve2AddWideningUpper_int_short", ["Isa"] = "Sve2", ["Method"] = "AddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2AddWideningUpper.template", new Dictionary { ["TestName"] = "Sve2AddWideningUpper_long_int", ["Isa"] = "Sve2", ["Method"] = "AddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2AddWideningUpper.template", new Dictionary { ["TestName"] = "Sve2AddWideningUpper_ushort_byte", ["Isa"] = "Sve2", ["Method"] = "AddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2AddWideningUpper.template", new Dictionary { ["TestName"] = "Sve2AddWideningUpper_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "AddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2AddWideningUpper.template", new Dictionary { ["TestName"] = "Sve2AddWideningUpper_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "AddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2DotProductComplex.template", new Dictionary { ["TestName"] = "Sve2DotProductComplex_int_sbyte", ["Isa"] = "Sve2", ["Method"] = "DotProductComplex", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2DotProductComplex.template", new Dictionary { ["TestName"] = "Sve2DotProductComplex_long_short", ["Isa"] = "Sve2", ["Method"] = "DotProductComplex", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2DotProductComplex.template", new Dictionary { ["TestName"] = "Sve2DotProductComplex_int_sbyte", ["Isa"] = "Sve2", ["Method"] = "DotProductComplex", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte",["Op4BaseType"] = "UInt64",["Op5BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2DotProductComplex.template", new Dictionary { ["TestName"] = "Sve2DotProductComplex_long_short", ["Isa"] = "Sve2", ["Method"] = "DotProductComplex", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16",["Op4BaseType"] = "UInt64",["Op5BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2HalvingAdd.template", new Dictionary { ["TestName"] = "Sve2HalvingAdd_sbyte", ["Isa"] = "Sve2", ["Method"] = "HalvingAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2HalvingAdd.template", new Dictionary { ["TestName"] = "Sve2HalvingAdd_short", ["Isa"] = "Sve2", ["Method"] = "HalvingAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2HalvingAdd.template", new Dictionary { ["TestName"] = "Sve2HalvingAdd_int", ["Isa"] = "Sve2", ["Method"] = "HalvingAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2HalvingAdd.template", new Dictionary { ["TestName"] = "Sve2HalvingAdd_long", ["Isa"] = "Sve2", ["Method"] = "HalvingAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2HalvingAdd.template", new Dictionary { ["TestName"] = "Sve2HalvingAdd_byte", ["Isa"] = "Sve2", ["Method"] = "HalvingAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2HalvingAdd.template", new Dictionary { ["TestName"] = "Sve2HalvingAdd_ushort", ["Isa"] = "Sve2", ["Method"] = "HalvingAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2HalvingAdd.template", new Dictionary { ["TestName"] = "Sve2HalvingAdd_uint", ["Isa"] = "Sve2", ["Method"] = "HalvingAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2HalvingAdd.template", new Dictionary { ["TestName"] = "Sve2HalvingAdd_ulong", ["Isa"] = "Sve2", ["Method"] = "HalvingAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2HalvingSubtract.template", new Dictionary { ["TestName"] = "Sve2HalvingSubtract_sbyte", ["Isa"] = "Sve2", ["Method"] = "HalvingSubtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2HalvingSubtract.template", new Dictionary { ["TestName"] = "Sve2HalvingSubtract_short", ["Isa"] = "Sve2", ["Method"] = "HalvingSubtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2HalvingSubtract.template", new Dictionary { ["TestName"] = "Sve2HalvingSubtract_int", ["Isa"] = "Sve2", ["Method"] = "HalvingSubtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2HalvingSubtract.template", new Dictionary { ["TestName"] = "Sve2HalvingSubtract_long", ["Isa"] = "Sve2", ["Method"] = "HalvingSubtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2HalvingSubtract.template", new Dictionary { ["TestName"] = "Sve2HalvingSubtract_byte", ["Isa"] = "Sve2", ["Method"] = "HalvingSubtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2HalvingSubtract.template", new Dictionary { ["TestName"] = "Sve2HalvingSubtract_ushort", ["Isa"] = "Sve2", ["Method"] = "HalvingSubtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2HalvingSubtract.template", new Dictionary { ["TestName"] = "Sve2HalvingSubtract_uint", ["Isa"] = "Sve2", ["Method"] = "HalvingSubtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2HalvingSubtract.template", new Dictionary { ["TestName"] = "Sve2HalvingSubtract_ulong", ["Isa"] = "Sve2", ["Method"] = "HalvingSubtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2HalvingSubtractReversed.template",new Dictionary {["TestName"] = "Sve2HalvingSubtractReversed_sbyte", ["Isa"] = "Sve2", ["Method"] = "HalvingSubtractReversed", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2HalvingSubtractReversed.template",new Dictionary {["TestName"] = "Sve2HalvingSubtractReversed_short", ["Isa"] = "Sve2", ["Method"] = "HalvingSubtractReversed", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2HalvingSubtractReversed.template",new Dictionary {["TestName"] = "Sve2HalvingSubtractReversed_int", ["Isa"] = "Sve2", ["Method"] = "HalvingSubtractReversed", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2HalvingSubtractReversed.template",new Dictionary {["TestName"] = "Sve2HalvingSubtractReversed_long", ["Isa"] = "Sve2", ["Method"] = "HalvingSubtractReversed", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2HalvingSubtractReversed.template",new Dictionary {["TestName"] = "Sve2HalvingSubtractReversed_byte", ["Isa"] = "Sve2", ["Method"] = "HalvingSubtractReversed", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2HalvingSubtractReversed.template",new Dictionary {["TestName"] = "Sve2HalvingSubtractReversed_ushort", ["Isa"] = "Sve2", ["Method"] = "HalvingSubtractReversed", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2HalvingSubtractReversed.template",new Dictionary {["TestName"] = "Sve2HalvingSubtractReversed_uint", ["Isa"] = "Sve2", ["Method"] = "HalvingSubtractReversed", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2HalvingSubtractReversed.template",new Dictionary {["TestName"] = "Sve2HalvingSubtractReversed_ulong", ["Isa"] = "Sve2", ["Method"] = "HalvingSubtractReversed", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2MaxNumberPairwise.template", new Dictionary { ["TestName"] = "Sve2MaxNumberPairwise_float", ["Isa"] = "Sve2", ["Method"] = "MaxNumberPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("Sve2MaxNumberPairwise.template", new Dictionary { ["TestName"] = "Sve2MaxNumberPairwise_double", ["Isa"] = "Sve2", ["Method"] = "MaxNumberPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("Sve2MaxPairwise.template", new Dictionary { ["TestName"] = "Sve2MaxPairwise_float", ["Isa"] = "Sve2", ["Method"] = "MaxPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("Sve2MaxPairwise.template", new Dictionary { ["TestName"] = "Sve2MaxPairwise_double", ["Isa"] = "Sve2", ["Method"] = "MaxPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("Sve2MaxPairwise.template", new Dictionary { ["TestName"] = "Sve2MaxPairwise_sbyte", ["Isa"] = "Sve2", ["Method"] = "MaxPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2MaxPairwise.template", new Dictionary { ["TestName"] = "Sve2MaxPairwise_short", ["Isa"] = "Sve2", ["Method"] = "MaxPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2MaxPairwise.template", new Dictionary { ["TestName"] = "Sve2MaxPairwise_int", ["Isa"] = "Sve2", ["Method"] = "MaxPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2MaxPairwise.template", new Dictionary { ["TestName"] = "Sve2MaxPairwise_long", ["Isa"] = "Sve2", ["Method"] = "MaxPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2MaxPairwise.template", new Dictionary { ["TestName"] = "Sve2MaxPairwise_byte", ["Isa"] = "Sve2", ["Method"] = "MaxPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2MaxPairwise.template", new Dictionary { ["TestName"] = "Sve2MaxPairwise_ushort", ["Isa"] = "Sve2", ["Method"] = "MaxPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2MaxPairwise.template", new Dictionary { ["TestName"] = "Sve2MaxPairwise_uint", ["Isa"] = "Sve2", ["Method"] = "MaxPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2MaxPairwise.template", new Dictionary { ["TestName"] = "Sve2MaxPairwise_ulong", ["Isa"] = "Sve2", ["Method"] = "MaxPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2MinNumberPairwise.template", new Dictionary { ["TestName"] = "Sve2MinNumberPairwise_float", ["Isa"] = "Sve2", ["Method"] = "MinNumberPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("Sve2MinNumberPairwise.template", new Dictionary { ["TestName"] = "Sve2MinNumberPairwise_double", ["Isa"] = "Sve2", ["Method"] = "MinNumberPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("Sve2MinPairwise.template", new Dictionary { ["TestName"] = "Sve2MinPairwise_float", ["Isa"] = "Sve2", ["Method"] = "MinPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("Sve2MinPairwise.template", new Dictionary { ["TestName"] = "Sve2MinPairwise_double", ["Isa"] = "Sve2", ["Method"] = "MinPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("Sve2MinPairwise.template", new Dictionary { ["TestName"] = "Sve2MinPairwise_sbyte", ["Isa"] = "Sve2", ["Method"] = "MinPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2MinPairwise.template", new Dictionary { ["TestName"] = "Sve2MinPairwise_short", ["Isa"] = "Sve2", ["Method"] = "MinPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2MinPairwise.template", new Dictionary { ["TestName"] = "Sve2MinPairwise_int", ["Isa"] = "Sve2", ["Method"] = "MinPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2MinPairwise.template", new Dictionary { ["TestName"] = "Sve2MinPairwise_long", ["Isa"] = "Sve2", ["Method"] = "MinPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2MinPairwise.template", new Dictionary { ["TestName"] = "Sve2MinPairwise_byte", ["Isa"] = "Sve2", ["Method"] = "MinPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2MinPairwise.template", new Dictionary { ["TestName"] = "Sve2MinPairwise_ushort", ["Isa"] = "Sve2", ["Method"] = "MinPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2MinPairwise.template", new Dictionary { ["TestName"] = "Sve2MinPairwise_uint", ["Isa"] = "Sve2", ["Method"] = "MinPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2MinPairwise.template", new Dictionary { ["TestName"] = "Sve2MinPairwise_ulong", ["Isa"] = "Sve2", ["Method"] = "MinPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddBySelectedScalar.template",new Dictionary {["TestName"] = "Sve2MultiplyAddBySelectedScalar_short", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddBySelectedScalar.template",new Dictionary {["TestName"] = "Sve2MultiplyAddBySelectedScalar_int", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddBySelectedScalar.template",new Dictionary {["TestName"] = "Sve2MultiplyAddBySelectedScalar_long", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddBySelectedScalar.template",new Dictionary {["TestName"] = "Sve2MultiplyAddBySelectedScalar_ushort", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddBySelectedScalar.template",new Dictionary {["TestName"] = "Sve2MultiplyAddBySelectedScalar_uint", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddBySelectedScalar.template",new Dictionary {["TestName"] = "Sve2MultiplyAddBySelectedScalar_ulong", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddWideningLower.template",new Dictionary {["TestName"] = "Sve2MultiplyAddWideningLower_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddWideningLower.template",new Dictionary {["TestName"] = "Sve2MultiplyAddWideningLower_int_short", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddWideningLower.template",new Dictionary {["TestName"] = "Sve2MultiplyAddWideningLower_long_int", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddWideningLower.template",new Dictionary {["TestName"] = "Sve2MultiplyAddWideningLower_ushort_byte", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddWideningLower.template",new Dictionary {["TestName"] = "Sve2MultiplyAddWideningLower_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddWideningLower.template",new Dictionary {["TestName"] = "Sve2MultiplyAddWideningLower_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddWideningLower.template",new Dictionary {["TestName"] = "Sve2MultiplyAddWideningLower_int_short", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddWideningLower.template",new Dictionary {["TestName"] = "Sve2MultiplyAddWideningLower_long_int", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddWideningLower.template",new Dictionary {["TestName"] = "Sve2MultiplyAddWideningLower_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddWideningLower.template",new Dictionary {["TestName"] = "Sve2MultiplyAddWideningLower_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddWideningUpper.template",new Dictionary {["TestName"] = "Sve2MultiplyAddWideningUpper_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddWideningUpper.template",new Dictionary {["TestName"] = "Sve2MultiplyAddWideningUpper_int_short", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddWideningUpper.template",new Dictionary {["TestName"] = "Sve2MultiplyAddWideningUpper_long_int", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddWideningUpper.template",new Dictionary {["TestName"] = "Sve2MultiplyAddWideningUpper_ushort_byte", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddWideningUpper.template",new Dictionary {["TestName"] = "Sve2MultiplyAddWideningUpper_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddWideningUpper.template",new Dictionary {["TestName"] = "Sve2MultiplyAddWideningUpper_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddWideningUpper.template",new Dictionary {["TestName"] = "Sve2MultiplyAddWideningUpper_int_short", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddWideningUpper.template",new Dictionary {["TestName"] = "Sve2MultiplyAddWideningUpper_long_int", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddWideningUpper.template",new Dictionary {["TestName"] = "Sve2MultiplyAddWideningUpper_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddWideningUpper.template",new Dictionary {["TestName"] = "Sve2MultiplyAddWideningUpper_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2MultiplyBySelectedScalar.template",new Dictionary {["TestName"] = "Sve2MultiplyBySelectedScalar_short", ["Isa"] = "Sve2", ["Method"] = "MultiplyBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyBySelectedScalar.template",new Dictionary {["TestName"] = "Sve2MultiplyBySelectedScalar_int", ["Isa"] = "Sve2", ["Method"] = "MultiplyBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyBySelectedScalar.template",new Dictionary {["TestName"] = "Sve2MultiplyBySelectedScalar_long", ["Isa"] = "Sve2", ["Method"] = "MultiplyBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyBySelectedScalar.template",new Dictionary {["TestName"] = "Sve2MultiplyBySelectedScalar_ushort", ["Isa"] = "Sve2", ["Method"] = "MultiplyBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyBySelectedScalar.template",new Dictionary {["TestName"] = "Sve2MultiplyBySelectedScalar_uint", ["Isa"] = "Sve2", ["Method"] = "MultiplyBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyBySelectedScalar.template",new Dictionary {["TestName"] = "Sve2MultiplyBySelectedScalar_ulong", ["Isa"] = "Sve2", ["Method"] = "MultiplyBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplySubtractBySelectedScalar.template",new Dictionary {["TestName"] = "Sve2MultiplySubtractBySelectedScalar_short", ["Isa"] = "Sve2", ["Method"] = "MultiplySubtractBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2MultiplySubtractBySelectedScalar.template",new Dictionary {["TestName"] = "Sve2MultiplySubtractBySelectedScalar_int", ["Isa"] = "Sve2", ["Method"] = "MultiplySubtractBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2MultiplySubtractBySelectedScalar.template",new Dictionary {["TestName"] = "Sve2MultiplySubtractBySelectedScalar_long", ["Isa"] = "Sve2", ["Method"] = "MultiplySubtractBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2MultiplySubtractBySelectedScalar.template",new Dictionary {["TestName"] = "Sve2MultiplySubtractBySelectedScalar_ushort", ["Isa"] = "Sve2", ["Method"] = "MultiplySubtractBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2MultiplySubtractBySelectedScalar.template",new Dictionary {["TestName"] = "Sve2MultiplySubtractBySelectedScalar_uint", ["Isa"] = "Sve2", ["Method"] = "MultiplySubtractBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2MultiplySubtractBySelectedScalar.template",new Dictionary {["TestName"] = "Sve2MultiplySubtractBySelectedScalar_ulong", ["Isa"] = "Sve2", ["Method"] = "MultiplySubtractBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2MultiplySubtractWideningLower.template",new Dictionary {["TestName"] = "Sve2MultiplySubtractWideningLower_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "MultiplySubtractWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplySubtractWideningLower.template",new Dictionary {["TestName"] = "Sve2MultiplySubtractWideningLower_int_short", ["Isa"] = "Sve2", ["Method"] = "MultiplySubtractWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplySubtractWideningLower.template",new Dictionary {["TestName"] = "Sve2MultiplySubtractWideningLower_long_int", ["Isa"] = "Sve2", ["Method"] = "MultiplySubtractWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplySubtractWideningLower.template",new Dictionary {["TestName"] = "Sve2MultiplySubtractWideningLower_ushort_byte", ["Isa"] = "Sve2", ["Method"] = "MultiplySubtractWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplySubtractWideningLower.template",new Dictionary {["TestName"] = "Sve2MultiplySubtractWideningLower_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "MultiplySubtractWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplySubtractWideningLower.template",new Dictionary {["TestName"] = "Sve2MultiplySubtractWideningLower_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "MultiplySubtractWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplySubtractWideningLower.template",new Dictionary {["TestName"] = "Sve2MultiplySubtractWideningLower_int_short", ["Isa"] = "Sve2", ["Method"] = "MultiplySubtractWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2MultiplySubtractWideningLower.template",new Dictionary {["TestName"] = "Sve2MultiplySubtractWideningLower_long_int", ["Isa"] = "Sve2", ["Method"] = "MultiplySubtractWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2MultiplySubtractWideningLower.template",new Dictionary {["TestName"] = "Sve2MultiplySubtractWideningLower_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "MultiplySubtractWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2MultiplySubtractWideningLower.template",new Dictionary {["TestName"] = "Sve2MultiplySubtractWideningLower_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "MultiplySubtractWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2MultiplySubtractWideningUpper.template",new Dictionary {["TestName"] = "Sve2MultiplySubtractWideningUpper_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "MultiplySubtractWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplySubtractWideningUpper.template",new Dictionary {["TestName"] = "Sve2MultiplySubtractWideningUpper_int_short", ["Isa"] = "Sve2", ["Method"] = "MultiplySubtractWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplySubtractWideningUpper.template",new Dictionary {["TestName"] = "Sve2MultiplySubtractWideningUpper_long_int", ["Isa"] = "Sve2", ["Method"] = "MultiplySubtractWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplySubtractWideningUpper.template",new Dictionary {["TestName"] = "Sve2MultiplySubtractWideningUpper_ushort_byte", ["Isa"] = "Sve2", ["Method"] = "MultiplySubtractWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplySubtractWideningUpper.template",new Dictionary {["TestName"] = "Sve2MultiplySubtractWideningUpper_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "MultiplySubtractWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplySubtractWideningUpper.template",new Dictionary {["TestName"] = "Sve2MultiplySubtractWideningUpper_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "MultiplySubtractWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplySubtractWideningUpper.template",new Dictionary {["TestName"] = "Sve2MultiplySubtractWideningUpper_int_short", ["Isa"] = "Sve2", ["Method"] = "MultiplySubtractWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2MultiplySubtractWideningUpper.template",new Dictionary {["TestName"] = "Sve2MultiplySubtractWideningUpper_long_int", ["Isa"] = "Sve2", ["Method"] = "MultiplySubtractWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2MultiplySubtractWideningUpper.template",new Dictionary {["TestName"] = "Sve2MultiplySubtractWideningUpper_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "MultiplySubtractWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2MultiplySubtractWideningUpper.template",new Dictionary {["TestName"] = "Sve2MultiplySubtractWideningUpper_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "MultiplySubtractWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2MultiplyWideningLower.template",new Dictionary { ["TestName"] = "Sve2MultiplyWideningLower_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "MultiplyWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyWideningLower.template",new Dictionary { ["TestName"] = "Sve2MultiplyWideningLower_int_short", ["Isa"] = "Sve2", ["Method"] = "MultiplyWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyWideningLower.template",new Dictionary { ["TestName"] = "Sve2MultiplyWideningLower_long_int", ["Isa"] = "Sve2", ["Method"] = "MultiplyWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyWideningLower.template",new Dictionary { ["TestName"] = "Sve2MultiplyWideningLower_ushort_byte", ["Isa"] = "Sve2", ["Method"] = "MultiplyWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyWideningLower.template",new Dictionary { ["TestName"] = "Sve2MultiplyWideningLower_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "MultiplyWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyWideningLower.template",new Dictionary { ["TestName"] = "Sve2MultiplyWideningLower_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "MultiplyWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyWideningLower.template",new Dictionary { ["TestName"] = "Sve2MultiplyWideningLower_int_short", ["Isa"] = "Sve2", ["Method"] = "MultiplyWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyWideningLower.template",new Dictionary { ["TestName"] = "Sve2MultiplyWideningLower_long_int", ["Isa"] = "Sve2", ["Method"] = "MultiplyWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyWideningLower.template",new Dictionary { ["TestName"] = "Sve2MultiplyWideningLower_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "MultiplyWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyWideningLower.template",new Dictionary { ["TestName"] = "Sve2MultiplyWideningLower_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "MultiplyWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyWideningUpper.template",new Dictionary { ["TestName"] = "Sve2MultiplyWideningUpper_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "MultiplyWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyWideningUpper.template",new Dictionary { ["TestName"] = "Sve2MultiplyWideningUpper_int_short", ["Isa"] = "Sve2", ["Method"] = "MultiplyWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyWideningUpper.template",new Dictionary { ["TestName"] = "Sve2MultiplyWideningUpper_long_int", ["Isa"] = "Sve2", ["Method"] = "MultiplyWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyWideningUpper.template",new Dictionary { ["TestName"] = "Sve2MultiplyWideningUpper_ushort_byte", ["Isa"] = "Sve2", ["Method"] = "MultiplyWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyWideningUpper.template",new Dictionary { ["TestName"] = "Sve2MultiplyWideningUpper_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "MultiplyWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyWideningUpper.template",new Dictionary { ["TestName"] = "Sve2MultiplyWideningUpper_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "MultiplyWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyWideningUpper.template",new Dictionary { ["TestName"] = "Sve2MultiplyWideningUpper_int_short", ["Isa"] = "Sve2", ["Method"] = "MultiplyWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyWideningUpper.template",new Dictionary { ["TestName"] = "Sve2MultiplyWideningUpper_long_int", ["Isa"] = "Sve2", ["Method"] = "MultiplyWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyWideningUpper.template",new Dictionary { ["TestName"] = "Sve2MultiplyWideningUpper_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "MultiplyWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyWideningUpper.template",new Dictionary { ["TestName"] = "Sve2MultiplyWideningUpper_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "MultiplyWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2PolynomialMultiply.template", new Dictionary { ["TestName"] = "Sve2PolynomialMultiply_byte", ["Isa"] = "Sve2", ["Method"] = "PolynomialMultiply", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2PolynomialMultiplyWideningLower.template",new Dictionary {["TestName"] = "Sve2PolynomialMultiplyWideningLower_ushort_byte", ["Isa"] = "Sve2", ["Method"] = "PolynomialMultiplyWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2PolynomialMultiplyWideningLower.template",new Dictionary {["TestName"] = "Sve2PolynomialMultiplyWideningLower_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "PolynomialMultiplyWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2PolynomialMultiplyWideningLower.template",new Dictionary {["TestName"] = "Sve2PolynomialMultiplyWideningLower_byte", ["Isa"] = "Sve2", ["Method"] = "PolynomialMultiplyWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2PolynomialMultiplyWideningLower.template",new Dictionary {["TestName"] = "Sve2PolynomialMultiplyWideningLower_uint", ["Isa"] = "Sve2", ["Method"] = "PolynomialMultiplyWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2PolynomialMultiplyWideningUpper.template",new Dictionary {["TestName"] = "Sve2PolynomialMultiplyWideningUpper_ushort_byte", ["Isa"] = "Sve2", ["Method"] = "PolynomialMultiplyWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2PolynomialMultiplyWideningUpper.template",new Dictionary {["TestName"] = "Sve2PolynomialMultiplyWideningUpper_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "PolynomialMultiplyWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2PolynomialMultiplyWideningUpper.template",new Dictionary {["TestName"] = "Sve2PolynomialMultiplyWideningUpper_byte", ["Isa"] = "Sve2", ["Method"] = "PolynomialMultiplyWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2PolynomialMultiplyWideningUpper.template",new Dictionary {["TestName"] = "Sve2PolynomialMultiplyWideningUpper_uint", ["Isa"] = "Sve2", ["Method"] = "PolynomialMultiplyWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingAddHighNarowingLower.template",new Dictionary {["TestName"] = "Sve2RoundingAddHighNarowingLower_sbyte_short", ["Isa"] = "Sve2", ["Method"] = "RoundingAddHighNarowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingAddHighNarowingLower.template",new Dictionary {["TestName"] = "Sve2RoundingAddHighNarowingLower_short_int", ["Isa"] = "Sve2", ["Method"] = "RoundingAddHighNarowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingAddHighNarowingLower.template",new Dictionary {["TestName"] = "Sve2RoundingAddHighNarowingLower_int_long", ["Isa"] = "Sve2", ["Method"] = "RoundingAddHighNarowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingAddHighNarowingLower.template",new Dictionary {["TestName"] = "Sve2RoundingAddHighNarowingLower_byte_ushort", ["Isa"] = "Sve2", ["Method"] = "RoundingAddHighNarowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingAddHighNarowingLower.template",new Dictionary {["TestName"] = "Sve2RoundingAddHighNarowingLower_ushort_uint", ["Isa"] = "Sve2", ["Method"] = "RoundingAddHighNarowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingAddHighNarowingLower.template",new Dictionary {["TestName"] = "Sve2RoundingAddHighNarowingLower_uint_ulong", ["Isa"] = "Sve2", ["Method"] = "RoundingAddHighNarowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingAddHighNarowingUpper.template",new Dictionary {["TestName"] = "Sve2RoundingAddHighNarowingUpper_sbyte_short", ["Isa"] = "Sve2", ["Method"] = "RoundingAddHighNarowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingAddHighNarowingUpper.template",new Dictionary {["TestName"] = "Sve2RoundingAddHighNarowingUpper_short_int", ["Isa"] = "Sve2", ["Method"] = "RoundingAddHighNarowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingAddHighNarowingUpper.template",new Dictionary {["TestName"] = "Sve2RoundingAddHighNarowingUpper_int_long", ["Isa"] = "Sve2", ["Method"] = "RoundingAddHighNarowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingAddHighNarowingUpper.template",new Dictionary {["TestName"] = "Sve2RoundingAddHighNarowingUpper_byte_ushort", ["Isa"] = "Sve2", ["Method"] = "RoundingAddHighNarowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingAddHighNarowingUpper.template",new Dictionary {["TestName"] = "Sve2RoundingAddHighNarowingUpper_ushort_uint", ["Isa"] = "Sve2", ["Method"] = "RoundingAddHighNarowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingAddHighNarowingUpper.template",new Dictionary {["TestName"] = "Sve2RoundingAddHighNarowingUpper_uint_ulong", ["Isa"] = "Sve2", ["Method"] = "RoundingAddHighNarowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingHalvingAdd.template", new Dictionary { ["TestName"] = "Sve2RoundingHalvingAdd_sbyte", ["Isa"] = "Sve2", ["Method"] = "RoundingHalvingAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingHalvingAdd.template", new Dictionary { ["TestName"] = "Sve2RoundingHalvingAdd_short", ["Isa"] = "Sve2", ["Method"] = "RoundingHalvingAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingHalvingAdd.template", new Dictionary { ["TestName"] = "Sve2RoundingHalvingAdd_int", ["Isa"] = "Sve2", ["Method"] = "RoundingHalvingAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingHalvingAdd.template", new Dictionary { ["TestName"] = "Sve2RoundingHalvingAdd_long", ["Isa"] = "Sve2", ["Method"] = "RoundingHalvingAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingHalvingAdd.template", new Dictionary { ["TestName"] = "Sve2RoundingHalvingAdd_byte", ["Isa"] = "Sve2", ["Method"] = "RoundingHalvingAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingHalvingAdd.template", new Dictionary { ["TestName"] = "Sve2RoundingHalvingAdd_ushort", ["Isa"] = "Sve2", ["Method"] = "RoundingHalvingAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingHalvingAdd.template", new Dictionary { ["TestName"] = "Sve2RoundingHalvingAdd_uint", ["Isa"] = "Sve2", ["Method"] = "RoundingHalvingAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingHalvingAdd.template", new Dictionary { ["TestName"] = "Sve2RoundingHalvingAdd_ulong", ["Isa"] = "Sve2", ["Method"] = "RoundingHalvingAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingSubtractHighNarowingLower.template",new Dictionary {["TestName"] = "Sve2RoundingSubtractHighNarowingLower_sbyte_short", ["Isa"] = "Sve2", ["Method"] = "RoundingSubtractHighNarowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingSubtractHighNarowingLower.template",new Dictionary {["TestName"] = "Sve2RoundingSubtractHighNarowingLower_short_int", ["Isa"] = "Sve2", ["Method"] = "RoundingSubtractHighNarowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingSubtractHighNarowingLower.template",new Dictionary {["TestName"] = "Sve2RoundingSubtractHighNarowingLower_int_long", ["Isa"] = "Sve2", ["Method"] = "RoundingSubtractHighNarowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingSubtractHighNarowingLower.template",new Dictionary {["TestName"] = "Sve2RoundingSubtractHighNarowingLower_byte_ushort", ["Isa"] = "Sve2", ["Method"] = "RoundingSubtractHighNarowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingSubtractHighNarowingLower.template",new Dictionary {["TestName"] = "Sve2RoundingSubtractHighNarowingLower_ushort_uint", ["Isa"] = "Sve2", ["Method"] = "RoundingSubtractHighNarowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingSubtractHighNarowingLower.template",new Dictionary {["TestName"] = "Sve2RoundingSubtractHighNarowingLower_uint_ulong", ["Isa"] = "Sve2", ["Method"] = "RoundingSubtractHighNarowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingSubtractHighNarowingUpper.template",new Dictionary {["TestName"] = "Sve2RoundingSubtractHighNarowingUpper_sbyte_short", ["Isa"] = "Sve2", ["Method"] = "RoundingSubtractHighNarowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingSubtractHighNarowingUpper.template",new Dictionary {["TestName"] = "Sve2RoundingSubtractHighNarowingUpper_short_int", ["Isa"] = "Sve2", ["Method"] = "RoundingSubtractHighNarowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingSubtractHighNarowingUpper.template",new Dictionary {["TestName"] = "Sve2RoundingSubtractHighNarowingUpper_int_long", ["Isa"] = "Sve2", ["Method"] = "RoundingSubtractHighNarowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingSubtractHighNarowingUpper.template",new Dictionary {["TestName"] = "Sve2RoundingSubtractHighNarowingUpper_byte_ushort", ["Isa"] = "Sve2", ["Method"] = "RoundingSubtractHighNarowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingSubtractHighNarowingUpper.template",new Dictionary {["TestName"] = "Sve2RoundingSubtractHighNarowingUpper_ushort_uint", ["Isa"] = "Sve2", ["Method"] = "RoundingSubtractHighNarowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2RoundingSubtractHighNarowingUpper.template",new Dictionary {["TestName"] = "Sve2RoundingSubtractHighNarowingUpper_uint_ulong", ["Isa"] = "Sve2", ["Method"] = "RoundingSubtractHighNarowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingAbs.template", new Dictionary { ["TestName"] = "Sve2SaturatingAbs_sbyte", ["Isa"] = "Sve2", ["Method"] = "SaturatingAbs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingAbs.template", new Dictionary { ["TestName"] = "Sve2SaturatingAbs_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingAbs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingAbs.template", new Dictionary { ["TestName"] = "Sve2SaturatingAbs_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingAbs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingAbs.template", new Dictionary { ["TestName"] = "Sve2SaturatingAbs_long", ["Isa"] = "Sve2", ["Method"] = "SaturatingAbs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyAddWideningLower.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyAddWideningLower_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyAddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyAddWideningLower.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyAddWideningLower_int_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyAddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyAddWideningLower.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyAddWideningLower_long_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyAddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyAddWideningLower.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyAddWideningLower_int_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyAddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyAddWideningLower.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyAddWideningLower_long_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyAddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyAddWideningLowerUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyAddWideningLowerUpper_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyAddWideningLowerUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyAddWideningLowerUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyAddWideningLowerUpper_int_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyAddWideningLowerUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyAddWideningLowerUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyAddWideningLowerUpper_long_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyAddWideningLowerUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyAddWideningUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyAddWideningUpper_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyAddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyAddWideningUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyAddWideningUpper_int_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyAddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyAddWideningUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyAddWideningUpper_long_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyAddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyAddWideningUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyAddWideningUpper_int_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyAddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyAddWideningUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyAddWideningUpper_long_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyAddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyHigh_sbyte", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyHigh_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyHigh_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyHigh_long", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyHigh_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyHigh_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyHigh_long", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplySubtractWideningLower.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplySubtractWideningLower_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplySubtractWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplySubtractWideningLower.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplySubtractWideningLower_int_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplySubtractWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplySubtractWideningLower.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplySubtractWideningLower_long_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplySubtractWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplySubtractWideningLower.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplySubtractWideningLower_int_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplySubtractWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplySubtractWideningLower.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplySubtractWideningLower_long_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplySubtractWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplySubtractWideningLowerUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplySubtractWideningLowerUpper_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplySubtractWideningLowerUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplySubtractWideningLowerUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplySubtractWideningLowerUpper_int_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplySubtractWideningLowerUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplySubtractWideningLowerUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplySubtractWideningLowerUpper_long_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplySubtractWideningLowerUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplySubtractWideningUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplySubtractWideningUpper_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplySubtractWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplySubtractWideningUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplySubtractWideningUpper_int_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplySubtractWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplySubtractWideningUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplySubtractWideningUpper_long_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplySubtractWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplySubtractWideningUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplySubtractWideningUpper_int_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplySubtractWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplySubtractWideningUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplySubtractWideningUpper_long_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplySubtractWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyWideningLower.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyWideningLower_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyWideningLower.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyWideningLower_int_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyWideningLower.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyWideningLower_long_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyWideningLower.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyWideningLower_int_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyWideningLower.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyWideningLower_long_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyWideningUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyWideningUpper_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyWideningUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyWideningUpper_int_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyWideningUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyWideningUpper_long_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyWideningUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyWideningUpper_int_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingDoublingMultiplyWideningUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingDoublingMultiplyWideningUpper_long_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingDoublingMultiplyWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingNegate.template", new Dictionary { ["TestName"] = "Sve2SaturatingNegate_sbyte", ["Isa"] = "Sve2", ["Method"] = "SaturatingNegate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingNegate.template", new Dictionary { ["TestName"] = "Sve2SaturatingNegate_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingNegate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingNegate.template", new Dictionary { ["TestName"] = "Sve2SaturatingNegate_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingNegate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingNegate.template", new Dictionary { ["TestName"] = "Sve2SaturatingNegate_long", ["Isa"] = "Sve2", ["Method"] = "SaturatingNegate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingRoundingDoublingMultiplyAddHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingRoundingDoublingMultiplyAddHigh_sbyte", ["Isa"] = "Sve2", ["Method"] = "SaturatingRoundingDoublingMultiplyAddHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingRoundingDoublingMultiplyAddHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingRoundingDoublingMultiplyAddHigh_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingRoundingDoublingMultiplyAddHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingRoundingDoublingMultiplyAddHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingRoundingDoublingMultiplyAddHigh_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingRoundingDoublingMultiplyAddHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingRoundingDoublingMultiplyAddHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingRoundingDoublingMultiplyAddHigh_long", ["Isa"] = "Sve2", ["Method"] = "SaturatingRoundingDoublingMultiplyAddHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingRoundingDoublingMultiplyAddHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingRoundingDoublingMultiplyAddHigh_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingRoundingDoublingMultiplyAddHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2SaturatingRoundingDoublingMultiplyAddHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingRoundingDoublingMultiplyAddHigh_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingRoundingDoublingMultiplyAddHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2SaturatingRoundingDoublingMultiplyAddHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingRoundingDoublingMultiplyAddHigh_long", ["Isa"] = "Sve2", ["Method"] = "SaturatingRoundingDoublingMultiplyAddHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2SaturatingRoundingDoublingMultiplyHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingRoundingDoublingMultiplyHigh_sbyte", ["Isa"] = "Sve2", ["Method"] = "SaturatingRoundingDoublingMultiplyHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingRoundingDoublingMultiplyHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingRoundingDoublingMultiplyHigh_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingRoundingDoublingMultiplyHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingRoundingDoublingMultiplyHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingRoundingDoublingMultiplyHigh_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingRoundingDoublingMultiplyHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingRoundingDoublingMultiplyHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingRoundingDoublingMultiplyHigh_long", ["Isa"] = "Sve2", ["Method"] = "SaturatingRoundingDoublingMultiplyHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingRoundingDoublingMultiplyHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingRoundingDoublingMultiplyHigh_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingRoundingDoublingMultiplyHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingRoundingDoublingMultiplyHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingRoundingDoublingMultiplyHigh_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingRoundingDoublingMultiplyHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingRoundingDoublingMultiplyHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingRoundingDoublingMultiplyHigh_long", ["Isa"] = "Sve2", ["Method"] = "SaturatingRoundingDoublingMultiplyHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingRoundingDoublingMultiplySubtractHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingRoundingDoublingMultiplySubtractHigh_sbyte", ["Isa"] = "Sve2", ["Method"] = "SaturatingRoundingDoublingMultiplySubtractHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingRoundingDoublingMultiplySubtractHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingRoundingDoublingMultiplySubtractHigh_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingRoundingDoublingMultiplySubtractHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingRoundingDoublingMultiplySubtractHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingRoundingDoublingMultiplySubtractHigh_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingRoundingDoublingMultiplySubtractHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingRoundingDoublingMultiplySubtractHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingRoundingDoublingMultiplySubtractHigh_long", ["Isa"] = "Sve2", ["Method"] = "SaturatingRoundingDoublingMultiplySubtractHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingRoundingDoublingMultiplySubtractHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingRoundingDoublingMultiplySubtractHigh_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingRoundingDoublingMultiplySubtractHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2SaturatingRoundingDoublingMultiplySubtractHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingRoundingDoublingMultiplySubtractHigh_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingRoundingDoublingMultiplySubtractHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2SaturatingRoundingDoublingMultiplySubtractHigh.template",new Dictionary {["TestName"] = "Sve2SaturatingRoundingDoublingMultiplySubtractHigh_long", ["Isa"] = "Sve2", ["Method"] = "SaturatingRoundingDoublingMultiplySubtractHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("Sve2SubtractHighNarowingLower.template",new Dictionary {["TestName"] = "Sve2SubtractHighNarowingLower_sbyte_short", ["Isa"] = "Sve2", ["Method"] = "SubtractHighNarowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractHighNarowingLower.template",new Dictionary {["TestName"] = "Sve2SubtractHighNarowingLower_short_int", ["Isa"] = "Sve2", ["Method"] = "SubtractHighNarowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractHighNarowingLower.template",new Dictionary {["TestName"] = "Sve2SubtractHighNarowingLower_int_long", ["Isa"] = "Sve2", ["Method"] = "SubtractHighNarowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractHighNarowingLower.template",new Dictionary {["TestName"] = "Sve2SubtractHighNarowingLower_byte_ushort", ["Isa"] = "Sve2", ["Method"] = "SubtractHighNarowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractHighNarowingLower.template",new Dictionary {["TestName"] = "Sve2SubtractHighNarowingLower_ushort_uint", ["Isa"] = "Sve2", ["Method"] = "SubtractHighNarowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractHighNarowingLower.template",new Dictionary {["TestName"] = "Sve2SubtractHighNarowingLower_uint_ulong", ["Isa"] = "Sve2", ["Method"] = "SubtractHighNarowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractHighNarowingUpper.template",new Dictionary {["TestName"] = "Sve2SubtractHighNarowingUpper_sbyte_short", ["Isa"] = "Sve2", ["Method"] = "SubtractHighNarowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractHighNarowingUpper.template",new Dictionary {["TestName"] = "Sve2SubtractHighNarowingUpper_short_int", ["Isa"] = "Sve2", ["Method"] = "SubtractHighNarowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractHighNarowingUpper.template",new Dictionary {["TestName"] = "Sve2SubtractHighNarowingUpper_int_long", ["Isa"] = "Sve2", ["Method"] = "SubtractHighNarowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractHighNarowingUpper.template",new Dictionary {["TestName"] = "Sve2SubtractHighNarowingUpper_byte_ushort", ["Isa"] = "Sve2", ["Method"] = "SubtractHighNarowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractHighNarowingUpper.template",new Dictionary {["TestName"] = "Sve2SubtractHighNarowingUpper_ushort_uint", ["Isa"] = "Sve2", ["Method"] = "SubtractHighNarowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractHighNarowingUpper.template",new Dictionary {["TestName"] = "Sve2SubtractHighNarowingUpper_uint_ulong", ["Isa"] = "Sve2", ["Method"] = "SubtractHighNarowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractSaturate.template", new Dictionary { ["TestName"] = "Sve2SubtractSaturate_sbyte", ["Isa"] = "Sve2", ["Method"] = "SubtractSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractSaturate.template", new Dictionary { ["TestName"] = "Sve2SubtractSaturate_short", ["Isa"] = "Sve2", ["Method"] = "SubtractSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractSaturate.template", new Dictionary { ["TestName"] = "Sve2SubtractSaturate_int", ["Isa"] = "Sve2", ["Method"] = "SubtractSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractSaturate.template", new Dictionary { ["TestName"] = "Sve2SubtractSaturate_long", ["Isa"] = "Sve2", ["Method"] = "SubtractSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractSaturate.template", new Dictionary { ["TestName"] = "Sve2SubtractSaturate_byte", ["Isa"] = "Sve2", ["Method"] = "SubtractSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractSaturate.template", new Dictionary { ["TestName"] = "Sve2SubtractSaturate_ushort", ["Isa"] = "Sve2", ["Method"] = "SubtractSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractSaturate.template", new Dictionary { ["TestName"] = "Sve2SubtractSaturate_uint", ["Isa"] = "Sve2", ["Method"] = "SubtractSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractSaturate.template", new Dictionary { ["TestName"] = "Sve2SubtractSaturate_ulong", ["Isa"] = "Sve2", ["Method"] = "SubtractSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractSaturateReversed.template",new Dictionary {["TestName"] = "Sve2SubtractSaturateReversed_sbyte", ["Isa"] = "Sve2", ["Method"] = "SubtractSaturateReversed", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractSaturateReversed.template",new Dictionary {["TestName"] = "Sve2SubtractSaturateReversed_short", ["Isa"] = "Sve2", ["Method"] = "SubtractSaturateReversed", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractSaturateReversed.template",new Dictionary {["TestName"] = "Sve2SubtractSaturateReversed_int", ["Isa"] = "Sve2", ["Method"] = "SubtractSaturateReversed", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractSaturateReversed.template",new Dictionary {["TestName"] = "Sve2SubtractSaturateReversed_long", ["Isa"] = "Sve2", ["Method"] = "SubtractSaturateReversed", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractSaturateReversed.template",new Dictionary {["TestName"] = "Sve2SubtractSaturateReversed_byte", ["Isa"] = "Sve2", ["Method"] = "SubtractSaturateReversed", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractSaturateReversed.template",new Dictionary {["TestName"] = "Sve2SubtractSaturateReversed_ushort", ["Isa"] = "Sve2", ["Method"] = "SubtractSaturateReversed", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractSaturateReversed.template",new Dictionary {["TestName"] = "Sve2SubtractSaturateReversed_uint", ["Isa"] = "Sve2", ["Method"] = "SubtractSaturateReversed", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractSaturateReversed.template",new Dictionary {["TestName"] = "Sve2SubtractSaturateReversed_ulong", ["Isa"] = "Sve2", ["Method"] = "SubtractSaturateReversed", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideLower.template", new Dictionary { ["TestName"] = "Sve2SubtractWideLower_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "SubtractWideLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideLower.template", new Dictionary { ["TestName"] = "Sve2SubtractWideLower_int_short", ["Isa"] = "Sve2", ["Method"] = "SubtractWideLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideLower.template", new Dictionary { ["TestName"] = "Sve2SubtractWideLower_long_int", ["Isa"] = "Sve2", ["Method"] = "SubtractWideLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideLower.template", new Dictionary { ["TestName"] = "Sve2SubtractWideLower_ushort_byte", ["Isa"] = "Sve2", ["Method"] = "SubtractWideLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideLower.template", new Dictionary { ["TestName"] = "Sve2SubtractWideLower_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "SubtractWideLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideLower.template", new Dictionary { ["TestName"] = "Sve2SubtractWideLower_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "SubtractWideLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideUpper.template", new Dictionary { ["TestName"] = "Sve2SubtractWideUpper_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "SubtractWideUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideUpper.template", new Dictionary { ["TestName"] = "Sve2SubtractWideUpper_int_short", ["Isa"] = "Sve2", ["Method"] = "SubtractWideUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideUpper.template", new Dictionary { ["TestName"] = "Sve2SubtractWideUpper_long_int", ["Isa"] = "Sve2", ["Method"] = "SubtractWideUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideUpper.template", new Dictionary { ["TestName"] = "Sve2SubtractWideUpper_ushort_byte", ["Isa"] = "Sve2", ["Method"] = "SubtractWideUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideUpper.template", new Dictionary { ["TestName"] = "Sve2SubtractWideUpper_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "SubtractWideUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideUpper.template", new Dictionary { ["TestName"] = "Sve2SubtractWideUpper_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "SubtractWideUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideningLower.template",new Dictionary { ["TestName"] = "Sve2SubtractWideningLower_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "SubtractWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideningLower.template",new Dictionary { ["TestName"] = "Sve2SubtractWideningLower_int_short", ["Isa"] = "Sve2", ["Method"] = "SubtractWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideningLower.template",new Dictionary { ["TestName"] = "Sve2SubtractWideningLower_long_int", ["Isa"] = "Sve2", ["Method"] = "SubtractWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideningLower.template",new Dictionary { ["TestName"] = "Sve2SubtractWideningLower_ushort_byte", ["Isa"] = "Sve2", ["Method"] = "SubtractWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideningLower.template",new Dictionary { ["TestName"] = "Sve2SubtractWideningLower_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "SubtractWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideningLower.template",new Dictionary { ["TestName"] = "Sve2SubtractWideningLower_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "SubtractWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideningLowerUpper.template",new Dictionary {["TestName"] = "Sve2SubtractWideningLowerUpper_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "SubtractWideningLowerUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideningLowerUpper.template",new Dictionary {["TestName"] = "Sve2SubtractWideningLowerUpper_int_short", ["Isa"] = "Sve2", ["Method"] = "SubtractWideningLowerUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideningLowerUpper.template",new Dictionary {["TestName"] = "Sve2SubtractWideningLowerUpper_long_int", ["Isa"] = "Sve2", ["Method"] = "SubtractWideningLowerUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideningUpper.template",new Dictionary { ["TestName"] = "Sve2SubtractWideningUpper_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "SubtractWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideningUpper.template",new Dictionary { ["TestName"] = "Sve2SubtractWideningUpper_int_short", ["Isa"] = "Sve2", ["Method"] = "SubtractWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideningUpper.template",new Dictionary { ["TestName"] = "Sve2SubtractWideningUpper_long_int", ["Isa"] = "Sve2", ["Method"] = "SubtractWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideningUpper.template",new Dictionary { ["TestName"] = "Sve2SubtractWideningUpper_ushort_byte", ["Isa"] = "Sve2", ["Method"] = "SubtractWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideningUpper.template",new Dictionary { ["TestName"] = "Sve2SubtractWideningUpper_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "SubtractWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideningUpper.template",new Dictionary { ["TestName"] = "Sve2SubtractWideningUpper_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "SubtractWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideningUpperLower.template",new Dictionary {["TestName"] = "Sve2SubtractWideningUpperLower_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "SubtractWideningUpperLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideningUpperLower.template",new Dictionary {["TestName"] = "Sve2SubtractWideningUpperLower_int_short", ["Isa"] = "Sve2", ["Method"] = "SubtractWideningUpperLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWideningUpperLower.template",new Dictionary {["TestName"] = "Sve2SubtractWideningUpperLower_long_int", ["Isa"] = "Sve2", ["Method"] = "SubtractWideningUpperLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWithBorrowWideningLower.template",new Dictionary {["TestName"] = "Sve2SubtractWithBorrowWideningLower_uint", ["Isa"] = "Sve2", ["Method"] = "SubtractWithBorrowWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWithBorrowWideningLower.template",new Dictionary {["TestName"] = "Sve2SubtractWithBorrowWideningLower_ulong", ["Isa"] = "Sve2", ["Method"] = "SubtractWithBorrowWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWithBorrowWideningUpper.template",new Dictionary {["TestName"] = "Sve2SubtractWithBorrowWideningUpper_uint", ["Isa"] = "Sve2", ["Method"] = "SubtractWithBorrowWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2SubtractWithBorrowWideningUpper.template",new Dictionary {["TestName"] = "Sve2SubtractWithBorrowWideningUpper_ulong", ["Isa"] = "Sve2", ["Method"] = "SubtractWithBorrowWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + + +// Sve2 mask + ("Sve2CreateWhileGreaterThanMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanMask_byte", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanMask_byte", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanMask_byte", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanMask_byte", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanMask_ushort", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanMask_ushort", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanMask_ushort", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanMask_ushort", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanMask_uint", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanMask_uint", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanMask_uint", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanMask_uint", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanMask_ulong", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanMask_ulong", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanMask_ulong", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanMask_ulong", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanOrEqualMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanOrEqualMask_byte", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanOrEqualMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanOrEqualMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanOrEqualMask_byte", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanOrEqualMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanOrEqualMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanOrEqualMask_byte", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanOrEqualMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanOrEqualMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanOrEqualMask_byte", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanOrEqualMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanOrEqualMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanOrEqualMask_ushort", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanOrEqualMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanOrEqualMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanOrEqualMask_ushort", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanOrEqualMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanOrEqualMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanOrEqualMask_ushort", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanOrEqualMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanOrEqualMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanOrEqualMask_ushort", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanOrEqualMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanOrEqualMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanOrEqualMask_uint", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanOrEqualMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanOrEqualMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanOrEqualMask_uint", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanOrEqualMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanOrEqualMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanOrEqualMask_uint", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanOrEqualMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanOrEqualMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanOrEqualMask_uint", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanOrEqualMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanOrEqualMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanOrEqualMask_ulong", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanOrEqualMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanOrEqualMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanOrEqualMask_ulong", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanOrEqualMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanOrEqualMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanOrEqualMask_ulong", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanOrEqualMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileGreaterThanOrEqualMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileGreaterThanOrEqualMask_ulong", ["Isa"] = "Sve2", ["Method"] = "CreateWhileGreaterThanOrEqualMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileReadAfterWriteMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileReadAfterWriteMask_float", ["Isa"] = "Sve2", ["Method"] = "CreateWhileReadAfterWriteMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileReadAfterWriteMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileReadAfterWriteMask_double", ["Isa"] = "Sve2", ["Method"] = "CreateWhileReadAfterWriteMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileReadAfterWriteMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileReadAfterWriteMask_sbyte", ["Isa"] = "Sve2", ["Method"] = "CreateWhileReadAfterWriteMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1BaseType"] = "SByte", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileReadAfterWriteMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileReadAfterWriteMask_short", ["Isa"] = "Sve2", ["Method"] = "CreateWhileReadAfterWriteMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileReadAfterWriteMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileReadAfterWriteMask_int", ["Isa"] = "Sve2", ["Method"] = "CreateWhileReadAfterWriteMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileReadAfterWriteMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileReadAfterWriteMask_long", ["Isa"] = "Sve2", ["Method"] = "CreateWhileReadAfterWriteMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileReadAfterWriteMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileReadAfterWriteMask_byte", ["Isa"] = "Sve2", ["Method"] = "CreateWhileReadAfterWriteMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1BaseType"] = "Byte", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileReadAfterWriteMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileReadAfterWriteMask_ushort", ["Isa"] = "Sve2", ["Method"] = "CreateWhileReadAfterWriteMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileReadAfterWriteMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileReadAfterWriteMask_uint", ["Isa"] = "Sve2", ["Method"] = "CreateWhileReadAfterWriteMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileReadAfterWriteMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileReadAfterWriteMask_ulong", ["Isa"] = "Sve2", ["Method"] = "CreateWhileReadAfterWriteMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileWriteAfterReadMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileWriteAfterReadMask_float", ["Isa"] = "Sve2", ["Method"] = "CreateWhileWriteAfterReadMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileWriteAfterReadMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileWriteAfterReadMask_double", ["Isa"] = "Sve2", ["Method"] = "CreateWhileWriteAfterReadMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileWriteAfterReadMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileWriteAfterReadMask_sbyte", ["Isa"] = "Sve2", ["Method"] = "CreateWhileWriteAfterReadMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1BaseType"] = "SByte", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileWriteAfterReadMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileWriteAfterReadMask_short", ["Isa"] = "Sve2", ["Method"] = "CreateWhileWriteAfterReadMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileWriteAfterReadMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileWriteAfterReadMask_int", ["Isa"] = "Sve2", ["Method"] = "CreateWhileWriteAfterReadMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileWriteAfterReadMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileWriteAfterReadMask_long", ["Isa"] = "Sve2", ["Method"] = "CreateWhileWriteAfterReadMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileWriteAfterReadMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileWriteAfterReadMask_byte", ["Isa"] = "Sve2", ["Method"] = "CreateWhileWriteAfterReadMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1BaseType"] = "Byte", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileWriteAfterReadMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileWriteAfterReadMask_ushort", ["Isa"] = "Sve2", ["Method"] = "CreateWhileWriteAfterReadMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileWriteAfterReadMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileWriteAfterReadMask_uint", ["Isa"] = "Sve2", ["Method"] = "CreateWhileWriteAfterReadMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2CreateWhileWriteAfterReadMask.template",new Dictionary {["TestName"] = "Sve2CreateWhileWriteAfterReadMask_ulong", ["Isa"] = "Sve2", ["Method"] = "CreateWhileWriteAfterReadMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2Match.template", new Dictionary { ["TestName"] = "Sve2Match_sbyte", ["Isa"] = "Sve2", ["Method"] = "Match", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2Match.template", new Dictionary { ["TestName"] = "Sve2Match_short", ["Isa"] = "Sve2", ["Method"] = "Match", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2Match.template", new Dictionary { ["TestName"] = "Sve2Match_byte", ["Isa"] = "Sve2", ["Method"] = "Match", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2Match.template", new Dictionary { ["TestName"] = "Sve2Match_ushort", ["Isa"] = "Sve2", ["Method"] = "Match", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2NoMatch.template", new Dictionary { ["TestName"] = "Sve2NoMatch_sbyte", ["Isa"] = "Sve2", ["Method"] = "NoMatch", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2NoMatch.template", new Dictionary { ["TestName"] = "Sve2NoMatch_short", ["Isa"] = "Sve2", ["Method"] = "NoMatch", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2NoMatch.template", new Dictionary { ["TestName"] = "Sve2NoMatch_byte", ["Isa"] = "Sve2", ["Method"] = "NoMatch", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2NoMatch.template", new Dictionary { ["TestName"] = "Sve2NoMatch_ushort", ["Isa"] = "Sve2", ["Method"] = "NoMatch", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingExtractNarrowingLower.template",new Dictionary {["TestName"] = "Sve2SaturatingExtractNarrowingLower_sbyte_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingExtractNarrowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingExtractNarrowingLower.template",new Dictionary {["TestName"] = "Sve2SaturatingExtractNarrowingLower_short_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingExtractNarrowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingExtractNarrowingLower.template",new Dictionary {["TestName"] = "Sve2SaturatingExtractNarrowingLower_int_long", ["Isa"] = "Sve2", ["Method"] = "SaturatingExtractNarrowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingExtractNarrowingLower.template",new Dictionary {["TestName"] = "Sve2SaturatingExtractNarrowingLower_byte_ushort", ["Isa"] = "Sve2", ["Method"] = "SaturatingExtractNarrowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingExtractNarrowingLower.template",new Dictionary {["TestName"] = "Sve2SaturatingExtractNarrowingLower_ushort_uint", ["Isa"] = "Sve2", ["Method"] = "SaturatingExtractNarrowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingExtractNarrowingLower.template",new Dictionary {["TestName"] = "Sve2SaturatingExtractNarrowingLower_uint_ulong", ["Isa"] = "Sve2", ["Method"] = "SaturatingExtractNarrowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingExtractNarrowingUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingExtractNarrowingUpper_sbyte_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingExtractNarrowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingExtractNarrowingUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingExtractNarrowingUpper_short_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingExtractNarrowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingExtractNarrowingUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingExtractNarrowingUpper_int_long", ["Isa"] = "Sve2", ["Method"] = "SaturatingExtractNarrowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingExtractNarrowingUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingExtractNarrowingUpper_byte_ushort", ["Isa"] = "Sve2", ["Method"] = "SaturatingExtractNarrowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingExtractNarrowingUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingExtractNarrowingUpper_ushort_uint", ["Isa"] = "Sve2", ["Method"] = "SaturatingExtractNarrowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingExtractNarrowingUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingExtractNarrowingUpper_uint_ulong", ["Isa"] = "Sve2", ["Method"] = "SaturatingExtractNarrowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingExtractUnsignedNarrowingLower.template",new Dictionary {["TestName"] = "Sve2SaturatingExtractUnsignedNarrowingLower_byte_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingExtractUnsignedNarrowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingExtractUnsignedNarrowingLower.template",new Dictionary {["TestName"] = "Sve2SaturatingExtractUnsignedNarrowingLower_ushort_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingExtractUnsignedNarrowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingExtractUnsignedNarrowingLower.template",new Dictionary {["TestName"] = "Sve2SaturatingExtractUnsignedNarrowingLower_uint_long", ["Isa"] = "Sve2", ["Method"] = "SaturatingExtractUnsignedNarrowingLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingExtractUnsignedNarrowingUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingExtractUnsignedNarrowingUpper_byte_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingExtractUnsignedNarrowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingExtractUnsignedNarrowingUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingExtractUnsignedNarrowingUpper_ushort_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingExtractUnsignedNarrowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingExtractUnsignedNarrowingUpper.template",new Dictionary {["TestName"] = "Sve2SaturatingExtractUnsignedNarrowingUpper_uint_long", ["Isa"] = "Sve2", ["Method"] = "SaturatingExtractUnsignedNarrowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + + +// Sve2 gatherloads + ("Sve2GatherVectorByteZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorByteZeroExtendNonTemporal_int_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorByteZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorByteZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorByteZeroExtendNonTemporal_uint_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorByteZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorByteZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorByteZeroExtendNonTemporal_long_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorByteZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorByteZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorByteZeroExtendNonTemporal_ulong_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorByteZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorByteZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorByteZeroExtendNonTemporal_int_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorByteZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorByteZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorByteZeroExtendNonTemporal_uint_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorByteZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorByteZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorByteZeroExtendNonTemporal_long_long", ["Isa"] = "Sve2", ["Method"] = "GatherVectorByteZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorByteZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorByteZeroExtendNonTemporal_ulong_long", ["Isa"] = "Sve2", ["Method"] = "GatherVectorByteZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorByteZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorByteZeroExtendNonTemporal_long_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorByteZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorByteZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorByteZeroExtendNonTemporal_ulong_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorByteZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt16SignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt16SignExtendNonTemporal_int_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt16SignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt16SignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt16SignExtendNonTemporal_uint_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt16SignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt16SignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt16SignExtendNonTemporal_long_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt16SignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt16SignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt16SignExtendNonTemporal_ulong_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt16SignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt16SignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt16SignExtendNonTemporal_long_long", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt16SignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt16SignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt16SignExtendNonTemporal_ulong_long", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt16SignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt16SignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt16SignExtendNonTemporal_long_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt16SignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt16SignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt16SignExtendNonTemporal_ulong_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt16SignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt16WithByteOffsetsSignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt16WithByteOffsetsSignExtendNonTemporal_int_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt16WithByteOffsetsSignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt16WithByteOffsetsSignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt16WithByteOffsetsSignExtendNonTemporal_uint_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt16WithByteOffsetsSignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt16WithByteOffsetsSignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt16WithByteOffsetsSignExtendNonTemporal_long_long", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt16WithByteOffsetsSignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt16WithByteOffsetsSignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt16WithByteOffsetsSignExtendNonTemporal_ulong_long", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt16WithByteOffsetsSignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt16WithByteOffsetsSignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt16WithByteOffsetsSignExtendNonTemporal_long_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt16WithByteOffsetsSignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt16WithByteOffsetsSignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt16WithByteOffsetsSignExtendNonTemporal_ulong_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt16WithByteOffsetsSignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt32SignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt32SignExtendNonTemporal_long_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt32SignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt32SignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt32SignExtendNonTemporal_int_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt32SignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt32SignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt32SignExtendNonTemporal_ulong_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt32SignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt32SignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt32SignExtendNonTemporal_uint_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt32SignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt32SignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt32SignExtendNonTemporal_long_long", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt32SignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt32SignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt32SignExtendNonTemporal_int_int", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt32SignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt32SignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt32SignExtendNonTemporal_ulong_long", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt32SignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt32SignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt32SignExtendNonTemporal_uint_int", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt32SignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt32SignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt32SignExtendNonTemporal_long_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt32SignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt32SignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt32SignExtendNonTemporal_int_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt32SignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt32SignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt32SignExtendNonTemporal_ulong_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt32SignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt32SignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt32SignExtendNonTemporal_uint_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt32SignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt32WithByteOffsetsSignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt32WithByteOffsetsSignExtendNonTemporal_long_long", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt32WithByteOffsetsSignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt32WithByteOffsetsSignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt32WithByteOffsetsSignExtendNonTemporal_int_int", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt32WithByteOffsetsSignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt32WithByteOffsetsSignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt32WithByteOffsetsSignExtendNonTemporal_ulong_long", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt32WithByteOffsetsSignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt32WithByteOffsetsSignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt32WithByteOffsetsSignExtendNonTemporal_uint_int", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt32WithByteOffsetsSignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt32WithByteOffsetsSignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt32WithByteOffsetsSignExtendNonTemporal_long_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt32WithByteOffsetsSignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt32WithByteOffsetsSignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt32WithByteOffsetsSignExtendNonTemporal_int_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt32WithByteOffsetsSignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt32WithByteOffsetsSignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt32WithByteOffsetsSignExtendNonTemporal_ulong_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt32WithByteOffsetsSignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorInt32WithByteOffsetsSignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorInt32WithByteOffsetsSignExtendNonTemporal_uint_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorInt32WithByteOffsetsSignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorNonTemporal_float_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorNonTemporal_int_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorNonTemporal_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorNonTemporal_double_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorNonTemporal_long_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorNonTemporal_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorNonTemporal_float_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorNonTemporal_int_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorNonTemporal_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorNonTemporal_double_long", ["Isa"] = "Sve2", ["Method"] = "GatherVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorNonTemporal_long", ["Isa"] = "Sve2", ["Method"] = "GatherVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorNonTemporal_ulong_long", ["Isa"] = "Sve2", ["Method"] = "GatherVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorNonTemporal_double_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorNonTemporal_long_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorNonTemporal_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorNonTemporal_double_long", ["Isa"] = "Sve2", ["Method"] = "GatherVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorNonTemporal_long", ["Isa"] = "Sve2", ["Method"] = "GatherVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorNonTemporal_ulong_long", ["Isa"] = "Sve2", ["Method"] = "GatherVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorNonTemporal_double_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorNonTemporal_long_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorNonTemporal_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorSByteSignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorSByteSignExtendNonTemporal_int_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorSByteSignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorSByteSignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorSByteSignExtendNonTemporal_uint_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorSByteSignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorSByteSignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorSByteSignExtendNonTemporal_long_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorSByteSignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorSByteSignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorSByteSignExtendNonTemporal_ulong_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorSByteSignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorSByteSignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorSByteSignExtendNonTemporal_int_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorSByteSignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorSByteSignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorSByteSignExtendNonTemporal_uint_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorSByteSignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorSByteSignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorSByteSignExtendNonTemporal_long_long", ["Isa"] = "Sve2", ["Method"] = "GatherVectorSByteSignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorSByteSignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorSByteSignExtendNonTemporal_ulong_long", ["Isa"] = "Sve2", ["Method"] = "GatherVectorSByteSignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorSByteSignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorSByteSignExtendNonTemporal_long_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorSByteSignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorSByteSignExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorSByteSignExtendNonTemporal_ulong_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorSByteSignExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal_int_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal_uint_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal_long_long", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal_ulong_long", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal_long_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal_ulong_ulong",["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt16ZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt16ZeroExtendNonTemporal_int_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt16ZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt16ZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt16ZeroExtendNonTemporal_uint_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt16ZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt16ZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt16ZeroExtendNonTemporal_long_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt16ZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt16ZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt16ZeroExtendNonTemporal_ulong_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt16ZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt16ZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt16ZeroExtendNonTemporal_long_long", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt16ZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt16ZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt16ZeroExtendNonTemporal_ulong_long", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt16ZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt16ZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt16ZeroExtendNonTemporal_long_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt16ZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt16ZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt16ZeroExtendNonTemporal_ulong_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt16ZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal_long_long", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal_int_int", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal_ulong_long", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal_uint_int", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal_long_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal_int_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal_ulong_ulong",["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal_uint_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt32ZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt32ZeroExtendNonTemporal_long_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt32ZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt32ZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt32ZeroExtendNonTemporal_int_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt32ZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt32ZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt32ZeroExtendNonTemporal_ulong_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt32ZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt32ZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt32ZeroExtendNonTemporal_uint_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt32ZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt32ZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt32ZeroExtendNonTemporal_long_long", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt32ZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt32ZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt32ZeroExtendNonTemporal_int_int", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt32ZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt32ZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt32ZeroExtendNonTemporal_ulong_long", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt32ZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt32ZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt32ZeroExtendNonTemporal_uint_int", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt32ZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt32ZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt32ZeroExtendNonTemporal_long_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt32ZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt32ZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt32ZeroExtendNonTemporal_int_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt32ZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt32ZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt32ZeroExtendNonTemporal_ulong_ulong", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt32ZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2GatherVectorUInt32ZeroExtendNonTemporal.template",new Dictionary {["TestName"] = "Sve2GatherVectorUInt32ZeroExtendNonTemporal_uint_uint", ["Isa"] = "Sve2", ["Method"] = "GatherVectorUInt32ZeroExtendNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + + +// Sve2 fp + ("Sve2AddRotateComplex.template", new Dictionary { ["TestName"] = "Sve2AddRotateComplex_sbyte", ["Isa"] = "Sve2", ["Method"] = "AddRotateComplex", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2AddRotateComplex.template", new Dictionary { ["TestName"] = "Sve2AddRotateComplex_short", ["Isa"] = "Sve2", ["Method"] = "AddRotateComplex", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2AddRotateComplex.template", new Dictionary { ["TestName"] = "Sve2AddRotateComplex_int", ["Isa"] = "Sve2", ["Method"] = "AddRotateComplex", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2AddRotateComplex.template", new Dictionary { ["TestName"] = "Sve2AddRotateComplex_long", ["Isa"] = "Sve2", ["Method"] = "AddRotateComplex", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2AddRotateComplex.template", new Dictionary { ["TestName"] = "Sve2AddRotateComplex_byte", ["Isa"] = "Sve2", ["Method"] = "AddRotateComplex", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2AddRotateComplex.template", new Dictionary { ["TestName"] = "Sve2AddRotateComplex_ushort", ["Isa"] = "Sve2", ["Method"] = "AddRotateComplex", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2AddRotateComplex.template", new Dictionary { ["TestName"] = "Sve2AddRotateComplex_uint", ["Isa"] = "Sve2", ["Method"] = "AddRotateComplex", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2AddRotateComplex.template", new Dictionary { ["TestName"] = "Sve2AddRotateComplex_ulong", ["Isa"] = "Sve2", ["Method"] = "AddRotateComplex", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2DownConvertNarrowingUpper.template",new Dictionary {["TestName"] = "Sve2DownConvertNarrowingUpper_float_double", ["Isa"] = "Sve2", ["Method"] = "DownConvertNarrowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("Sve2DownConvertRoundingOdd.template",new Dictionary {["TestName"] = "Sve2DownConvertRoundingOdd_float_double", ["Isa"] = "Sve2", ["Method"] = "DownConvertRoundingOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("Sve2DownConvertRoundingOddUpper.template",new Dictionary {["TestName"] = "Sve2DownConvertRoundingOddUpper_float_double", ["Isa"] = "Sve2", ["Method"] = "DownConvertRoundingOddUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("Sve2Log2.template", new Dictionary { ["TestName"] = "Sve2Log2_int_float", ["Isa"] = "Sve2", ["Method"] = "Log2", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("Sve2Log2.template", new Dictionary { ["TestName"] = "Sve2Log2_long_double", ["Isa"] = "Sve2", ["Method"] = "Log2", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddRotateComplex.template",new Dictionary {["TestName"] = "Sve2MultiplyAddRotateComplex_sbyte", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddRotateComplex", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddRotateComplex.template",new Dictionary {["TestName"] = "Sve2MultiplyAddRotateComplex_short", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddRotateComplex", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddRotateComplex.template",new Dictionary {["TestName"] = "Sve2MultiplyAddRotateComplex_int", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddRotateComplex", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddRotateComplex.template",new Dictionary {["TestName"] = "Sve2MultiplyAddRotateComplex_long", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddRotateComplex", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddRotateComplex.template",new Dictionary {["TestName"] = "Sve2MultiplyAddRotateComplex_byte", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddRotateComplex", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddRotateComplex.template",new Dictionary {["TestName"] = "Sve2MultiplyAddRotateComplex_ushort", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddRotateComplex", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddRotateComplex.template",new Dictionary {["TestName"] = "Sve2MultiplyAddRotateComplex_uint", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddRotateComplex", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddRotateComplex.template",new Dictionary {["TestName"] = "Sve2MultiplyAddRotateComplex_ulong", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddRotateComplex", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddRotateComplexBySelectedScalar.template",new Dictionary {["TestName"] = "Sve2MultiplyAddRotateComplexBySelectedScalar_short", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddRotateComplexBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16",["Op4BaseType"] = "Byte",["Op5BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddRotateComplexBySelectedScalar.template",new Dictionary {["TestName"] = "Sve2MultiplyAddRotateComplexBySelectedScalar_int", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddRotateComplexBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4BaseType"] = "Byte",["Op5BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddRotateComplexBySelectedScalar.template",new Dictionary {["TestName"] = "Sve2MultiplyAddRotateComplexBySelectedScalar_ushort", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddRotateComplexBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16",["Op4BaseType"] = "Byte",["Op5BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2MultiplyAddRotateComplexBySelectedScalar.template",new Dictionary {["TestName"] = "Sve2MultiplyAddRotateComplexBySelectedScalar_uint", ["Isa"] = "Sve2", ["Method"] = "MultiplyAddRotateComplexBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32",["Op4BaseType"] = "Byte",["Op5BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2ReciprocalEstimate.template", new Dictionary { ["TestName"] = "Sve2ReciprocalEstimate_uint", ["Isa"] = "Sve2", ["Method"] = "ReciprocalEstimate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2ReciprocalSqrtEstimate.template",new Dictionary {["TestName"] = "Sve2ReciprocalSqrtEstimate_uint", ["Isa"] = "Sve2", ["Method"] = "ReciprocalSqrtEstimate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingComplexAddRotate.template",new Dictionary {["TestName"] = "Sve2SaturatingComplexAddRotate_sbyte", ["Isa"] = "Sve2", ["Method"] = "SaturatingComplexAddRotate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingComplexAddRotate.template",new Dictionary {["TestName"] = "Sve2SaturatingComplexAddRotate_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingComplexAddRotate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingComplexAddRotate.template",new Dictionary {["TestName"] = "Sve2SaturatingComplexAddRotate_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingComplexAddRotate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingComplexAddRotate.template",new Dictionary {["TestName"] = "Sve2SaturatingComplexAddRotate_long", ["Isa"] = "Sve2", ["Method"] = "SaturatingComplexAddRotate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2SaturatingRoundingDoublingComplexMultiplyAddHighRotate.template",new Dictionary {["TestName"] = "Sve2SaturatingRoundingDoublingComplexMultiplyAddHighRotate_sbyte", ["Isa"] = "Sve2", ["Method"] = "SaturatingRoundingDoublingComplexMultiplyAddHighRotate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2SaturatingRoundingDoublingComplexMultiplyAddHighRotate.template",new Dictionary {["TestName"] = "Sve2SaturatingRoundingDoublingComplexMultiplyAddHighRotate_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingRoundingDoublingComplexMultiplyAddHighRotate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2SaturatingRoundingDoublingComplexMultiplyAddHighRotate.template",new Dictionary {["TestName"] = "Sve2SaturatingRoundingDoublingComplexMultiplyAddHighRotate_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingRoundingDoublingComplexMultiplyAddHighRotate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2SaturatingRoundingDoublingComplexMultiplyAddHighRotate.template",new Dictionary {["TestName"] = "Sve2SaturatingRoundingDoublingComplexMultiplyAddHighRotate_long", ["Isa"] = "Sve2", ["Method"] = "SaturatingRoundingDoublingComplexMultiplyAddHighRotate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2SaturatingRoundingDoublingComplexMultiplyAddHighRotate.template",new Dictionary {["TestName"] = "Sve2SaturatingRoundingDoublingComplexMultiplyAddHighRotate_short", ["Isa"] = "Sve2", ["Method"] = "SaturatingRoundingDoublingComplexMultiplyAddHighRotate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16",["Op4BaseType"] = "UInt64",["Op5BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2SaturatingRoundingDoublingComplexMultiplyAddHighRotate.template",new Dictionary {["TestName"] = "Sve2SaturatingRoundingDoublingComplexMultiplyAddHighRotate_int", ["Isa"] = "Sve2", ["Method"] = "SaturatingRoundingDoublingComplexMultiplyAddHighRotate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32",["Op4BaseType"] = "UInt64",["Op5BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("Sve2UpConvertWideningUpper.template",new Dictionary {["TestName"] = "Sve2UpConvertWideningUpper_double_float", ["Isa"] = "Sve2", ["Method"] = "UpConvertWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + + +// Sve2 counting + ("Sve2CountMatchingElements.template",new Dictionary { ["TestName"] = "Sve2CountMatchingElements_uint_int", ["Isa"] = "Sve2", ["Method"] = "CountMatchingElements", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2CountMatchingElements.template",new Dictionary { ["TestName"] = "Sve2CountMatchingElements_ulong_long", ["Isa"] = "Sve2", ["Method"] = "CountMatchingElements", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2CountMatchingElements.template",new Dictionary { ["TestName"] = "Sve2CountMatchingElements_uint", ["Isa"] = "Sve2", ["Method"] = "CountMatchingElements", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2CountMatchingElements.template",new Dictionary { ["TestName"] = "Sve2CountMatchingElements_ulong", ["Isa"] = "Sve2", ["Method"] = "CountMatchingElements", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2CountMatchingElementsIn128BitSegments.template",new Dictionary {["TestName"] = "Sve2CountMatchingElementsIn128BitSegments_byte_sbyte", ["Isa"] = "Sve2", ["Method"] = "CountMatchingElementsIn128BitSegments", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2CountMatchingElementsIn128BitSegments.template",new Dictionary {["TestName"] = "Sve2CountMatchingElementsIn128BitSegments_byte", ["Isa"] = "Sve2", ["Method"] = "CountMatchingElementsIn128BitSegments", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + + +// Sve2 bitwise + ("Sve2BitwiseClearXor.template", new Dictionary { ["TestName"] = "Sve2BitwiseClearXor_sbyte", ["Isa"] = "Sve2", ["Method"] = "BitwiseClearXor", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseClearXor.template", new Dictionary { ["TestName"] = "Sve2BitwiseClearXor_short", ["Isa"] = "Sve2", ["Method"] = "BitwiseClearXor", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseClearXor.template", new Dictionary { ["TestName"] = "Sve2BitwiseClearXor_int", ["Isa"] = "Sve2", ["Method"] = "BitwiseClearXor", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseClearXor.template", new Dictionary { ["TestName"] = "Sve2BitwiseClearXor_long", ["Isa"] = "Sve2", ["Method"] = "BitwiseClearXor", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseClearXor.template", new Dictionary { ["TestName"] = "Sve2BitwiseClearXor_byte", ["Isa"] = "Sve2", ["Method"] = "BitwiseClearXor", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseClearXor.template", new Dictionary { ["TestName"] = "Sve2BitwiseClearXor_ushort", ["Isa"] = "Sve2", ["Method"] = "BitwiseClearXor", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseClearXor.template", new Dictionary { ["TestName"] = "Sve2BitwiseClearXor_uint", ["Isa"] = "Sve2", ["Method"] = "BitwiseClearXor", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseClearXor.template", new Dictionary { ["TestName"] = "Sve2BitwiseClearXor_ulong", ["Isa"] = "Sve2", ["Method"] = "BitwiseClearXor", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseSelect.template", new Dictionary { ["TestName"] = "Sve2BitwiseSelect_sbyte", ["Isa"] = "Sve2", ["Method"] = "BitwiseSelect", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseSelect.template", new Dictionary { ["TestName"] = "Sve2BitwiseSelect_short", ["Isa"] = "Sve2", ["Method"] = "BitwiseSelect", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseSelect.template", new Dictionary { ["TestName"] = "Sve2BitwiseSelect_int", ["Isa"] = "Sve2", ["Method"] = "BitwiseSelect", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseSelect.template", new Dictionary { ["TestName"] = "Sve2BitwiseSelect_long", ["Isa"] = "Sve2", ["Method"] = "BitwiseSelect", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseSelect.template", new Dictionary { ["TestName"] = "Sve2BitwiseSelect_byte", ["Isa"] = "Sve2", ["Method"] = "BitwiseSelect", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseSelect.template", new Dictionary { ["TestName"] = "Sve2BitwiseSelect_ushort", ["Isa"] = "Sve2", ["Method"] = "BitwiseSelect", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseSelect.template", new Dictionary { ["TestName"] = "Sve2BitwiseSelect_uint", ["Isa"] = "Sve2", ["Method"] = "BitwiseSelect", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseSelect.template", new Dictionary { ["TestName"] = "Sve2BitwiseSelect_ulong", ["Isa"] = "Sve2", ["Method"] = "BitwiseSelect", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseSelectLeftInverted.template",new Dictionary {["TestName"] = "Sve2BitwiseSelectLeftInverted_sbyte", ["Isa"] = "Sve2", ["Method"] = "BitwiseSelectLeftInverted", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseSelectLeftInverted.template",new Dictionary {["TestName"] = "Sve2BitwiseSelectLeftInverted_short", ["Isa"] = "Sve2", ["Method"] = "BitwiseSelectLeftInverted", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseSelectLeftInverted.template",new Dictionary {["TestName"] = "Sve2BitwiseSelectLeftInverted_int", ["Isa"] = "Sve2", ["Method"] = "BitwiseSelectLeftInverted", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseSelectLeftInverted.template",new Dictionary {["TestName"] = "Sve2BitwiseSelectLeftInverted_long", ["Isa"] = "Sve2", ["Method"] = "BitwiseSelectLeftInverted", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseSelectLeftInverted.template",new Dictionary {["TestName"] = "Sve2BitwiseSelectLeftInverted_byte", ["Isa"] = "Sve2", ["Method"] = "BitwiseSelectLeftInverted", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseSelectLeftInverted.template",new Dictionary {["TestName"] = "Sve2BitwiseSelectLeftInverted_ushort", ["Isa"] = "Sve2", ["Method"] = "BitwiseSelectLeftInverted", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseSelectLeftInverted.template",new Dictionary {["TestName"] = "Sve2BitwiseSelectLeftInverted_uint", ["Isa"] = "Sve2", ["Method"] = "BitwiseSelectLeftInverted", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseSelectLeftInverted.template",new Dictionary {["TestName"] = "Sve2BitwiseSelectLeftInverted_ulong", ["Isa"] = "Sve2", ["Method"] = "BitwiseSelectLeftInverted", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseSelectRightInverted.template",new Dictionary {["TestName"] = "Sve2BitwiseSelectRightInverted_sbyte", ["Isa"] = "Sve2", ["Method"] = "BitwiseSelectRightInverted", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseSelectRightInverted.template",new Dictionary {["TestName"] = "Sve2BitwiseSelectRightInverted_short", ["Isa"] = "Sve2", ["Method"] = "BitwiseSelectRightInverted", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseSelectRightInverted.template",new Dictionary {["TestName"] = "Sve2BitwiseSelectRightInverted_int", ["Isa"] = "Sve2", ["Method"] = "BitwiseSelectRightInverted", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseSelectRightInverted.template",new Dictionary {["TestName"] = "Sve2BitwiseSelectRightInverted_long", ["Isa"] = "Sve2", ["Method"] = "BitwiseSelectRightInverted", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseSelectRightInverted.template",new Dictionary {["TestName"] = "Sve2BitwiseSelectRightInverted_byte", ["Isa"] = "Sve2", ["Method"] = "BitwiseSelectRightInverted", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseSelectRightInverted.template",new Dictionary {["TestName"] = "Sve2BitwiseSelectRightInverted_ushort", ["Isa"] = "Sve2", ["Method"] = "BitwiseSelectRightInverted", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseSelectRightInverted.template",new Dictionary {["TestName"] = "Sve2BitwiseSelectRightInverted_uint", ["Isa"] = "Sve2", ["Method"] = "BitwiseSelectRightInverted", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2BitwiseSelectRightInverted.template",new Dictionary {["TestName"] = "Sve2BitwiseSelectRightInverted_ulong", ["Isa"] = "Sve2", ["Method"] = "BitwiseSelectRightInverted", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftArithmeticRounded.template",new Dictionary {["TestName"] = "Sve2ShiftArithmeticRounded_sbyte", ["Isa"] = "Sve2", ["Method"] = "ShiftArithmeticRounded", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftArithmeticRounded.template",new Dictionary {["TestName"] = "Sve2ShiftArithmeticRounded_short", ["Isa"] = "Sve2", ["Method"] = "ShiftArithmeticRounded", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftArithmeticRounded.template",new Dictionary {["TestName"] = "Sve2ShiftArithmeticRounded_int", ["Isa"] = "Sve2", ["Method"] = "ShiftArithmeticRounded", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftArithmeticRounded.template",new Dictionary {["TestName"] = "Sve2ShiftArithmeticRounded_long", ["Isa"] = "Sve2", ["Method"] = "ShiftArithmeticRounded", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftArithmeticRoundedSaturate.template",new Dictionary {["TestName"] = "Sve2ShiftArithmeticRoundedSaturate_sbyte", ["Isa"] = "Sve2", ["Method"] = "ShiftArithmeticRoundedSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftArithmeticRoundedSaturate.template",new Dictionary {["TestName"] = "Sve2ShiftArithmeticRoundedSaturate_short", ["Isa"] = "Sve2", ["Method"] = "ShiftArithmeticRoundedSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftArithmeticRoundedSaturate.template",new Dictionary {["TestName"] = "Sve2ShiftArithmeticRoundedSaturate_int", ["Isa"] = "Sve2", ["Method"] = "ShiftArithmeticRoundedSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftArithmeticRoundedSaturate.template",new Dictionary {["TestName"] = "Sve2ShiftArithmeticRoundedSaturate_long", ["Isa"] = "Sve2", ["Method"] = "ShiftArithmeticRoundedSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftArithmeticSaturate.template",new Dictionary {["TestName"] = "Sve2ShiftArithmeticSaturate_sbyte", ["Isa"] = "Sve2", ["Method"] = "ShiftArithmeticSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftArithmeticSaturate.template",new Dictionary {["TestName"] = "Sve2ShiftArithmeticSaturate_short", ["Isa"] = "Sve2", ["Method"] = "ShiftArithmeticSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftArithmeticSaturate.template",new Dictionary {["TestName"] = "Sve2ShiftArithmeticSaturate_int", ["Isa"] = "Sve2", ["Method"] = "ShiftArithmeticSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftArithmeticSaturate.template",new Dictionary {["TestName"] = "Sve2ShiftArithmeticSaturate_long", ["Isa"] = "Sve2", ["Method"] = "ShiftArithmeticSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftAndInsert.template", new Dictionary { ["TestName"] = "Sve2ShiftLeftAndInsert_sbyte", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftAndInsert", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftAndInsert.template", new Dictionary { ["TestName"] = "Sve2ShiftLeftAndInsert_short", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftAndInsert", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftAndInsert.template", new Dictionary { ["TestName"] = "Sve2ShiftLeftAndInsert_int", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftAndInsert", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftAndInsert.template", new Dictionary { ["TestName"] = "Sve2ShiftLeftAndInsert_long", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftAndInsert", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftAndInsert.template", new Dictionary { ["TestName"] = "Sve2ShiftLeftAndInsert_byte", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftAndInsert", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftAndInsert.template", new Dictionary { ["TestName"] = "Sve2ShiftLeftAndInsert_ushort", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftAndInsert", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftAndInsert.template", new Dictionary { ["TestName"] = "Sve2ShiftLeftAndInsert_uint", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftAndInsert", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftAndInsert.template", new Dictionary { ["TestName"] = "Sve2ShiftLeftAndInsert_ulong", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftAndInsert", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftLogicalSaturate.template",new Dictionary {["TestName"] = "Sve2ShiftLeftLogicalSaturate_byte_sbyte", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftLogicalSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftLogicalSaturate.template",new Dictionary {["TestName"] = "Sve2ShiftLeftLogicalSaturate_ushort_short", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftLogicalSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftLogicalSaturate.template",new Dictionary {["TestName"] = "Sve2ShiftLeftLogicalSaturate_uint_int", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftLogicalSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftLogicalSaturate.template",new Dictionary {["TestName"] = "Sve2ShiftLeftLogicalSaturate_ulong_long", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftLogicalSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftLogicalSaturateUnsigned.template",new Dictionary {["TestName"] = "Sve2ShiftLeftLogicalSaturateUnsigned_byte_sbyte", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftLogicalSaturateUnsigned", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftLogicalSaturateUnsigned.template",new Dictionary {["TestName"] = "Sve2ShiftLeftLogicalSaturateUnsigned_ushort_short", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftLogicalSaturateUnsigned", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftLogicalSaturateUnsigned.template",new Dictionary {["TestName"] = "Sve2ShiftLeftLogicalSaturateUnsigned_uint_int", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftLogicalSaturateUnsigned", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftLogicalSaturateUnsigned.template",new Dictionary {["TestName"] = "Sve2ShiftLeftLogicalSaturateUnsigned_ulong_long", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftLogicalSaturateUnsigned", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftLogicalWideningEven.template",new Dictionary {["TestName"] = "Sve2ShiftLeftLogicalWideningEven_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftLogicalWideningEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftLogicalWideningEven.template",new Dictionary {["TestName"] = "Sve2ShiftLeftLogicalWideningEven_int_short", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftLogicalWideningEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftLogicalWideningEven.template",new Dictionary {["TestName"] = "Sve2ShiftLeftLogicalWideningEven_long_int", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftLogicalWideningEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftLogicalWideningEven.template",new Dictionary {["TestName"] = "Sve2ShiftLeftLogicalWideningEven_ushort_byte", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftLogicalWideningEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftLogicalWideningEven.template",new Dictionary {["TestName"] = "Sve2ShiftLeftLogicalWideningEven_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftLogicalWideningEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftLogicalWideningEven.template",new Dictionary {["TestName"] = "Sve2ShiftLeftLogicalWideningEven_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftLogicalWideningEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftLogicalWideningOdd.template",new Dictionary {["TestName"] = "Sve2ShiftLeftLogicalWideningOdd_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftLogicalWideningOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftLogicalWideningOdd.template",new Dictionary {["TestName"] = "Sve2ShiftLeftLogicalWideningOdd_int_short", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftLogicalWideningOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftLogicalWideningOdd.template",new Dictionary {["TestName"] = "Sve2ShiftLeftLogicalWideningOdd_long_int", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftLogicalWideningOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftLogicalWideningOdd.template",new Dictionary {["TestName"] = "Sve2ShiftLeftLogicalWideningOdd_ushort_byte", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftLogicalWideningOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftLogicalWideningOdd.template",new Dictionary {["TestName"] = "Sve2ShiftLeftLogicalWideningOdd_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftLogicalWideningOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLeftLogicalWideningOdd.template",new Dictionary {["TestName"] = "Sve2ShiftLeftLogicalWideningOdd_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "ShiftLeftLogicalWideningOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLogicalRounded.template", new Dictionary { ["TestName"] = "Sve2ShiftLogicalRounded_byte_sbyte", ["Isa"] = "Sve2", ["Method"] = "ShiftLogicalRounded", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLogicalRounded.template", new Dictionary { ["TestName"] = "Sve2ShiftLogicalRounded_ushort_short", ["Isa"] = "Sve2", ["Method"] = "ShiftLogicalRounded", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLogicalRounded.template", new Dictionary { ["TestName"] = "Sve2ShiftLogicalRounded_uint_int", ["Isa"] = "Sve2", ["Method"] = "ShiftLogicalRounded", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLogicalRounded.template", new Dictionary { ["TestName"] = "Sve2ShiftLogicalRounded_ulong_long", ["Isa"] = "Sve2", ["Method"] = "ShiftLogicalRounded", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLogicalRoundedSaturate.template",new Dictionary {["TestName"] = "Sve2ShiftLogicalRoundedSaturate_byte_sbyte", ["Isa"] = "Sve2", ["Method"] = "ShiftLogicalRoundedSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLogicalRoundedSaturate.template",new Dictionary {["TestName"] = "Sve2ShiftLogicalRoundedSaturate_ushort_short", ["Isa"] = "Sve2", ["Method"] = "ShiftLogicalRoundedSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLogicalRoundedSaturate.template",new Dictionary {["TestName"] = "Sve2ShiftLogicalRoundedSaturate_uint_int", ["Isa"] = "Sve2", ["Method"] = "ShiftLogicalRoundedSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftLogicalRoundedSaturate.template",new Dictionary {["TestName"] = "Sve2ShiftLogicalRoundedSaturate_ulong_long", ["Isa"] = "Sve2", ["Method"] = "ShiftLogicalRoundedSaturate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightAndInsert.template", new Dictionary { ["TestName"] = "Sve2ShiftRightAndInsert_sbyte", ["Isa"] = "Sve2", ["Method"] = "ShiftRightAndInsert", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightAndInsert.template", new Dictionary { ["TestName"] = "Sve2ShiftRightAndInsert_short", ["Isa"] = "Sve2", ["Method"] = "ShiftRightAndInsert", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightAndInsert.template", new Dictionary { ["TestName"] = "Sve2ShiftRightAndInsert_int", ["Isa"] = "Sve2", ["Method"] = "ShiftRightAndInsert", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightAndInsert.template", new Dictionary { ["TestName"] = "Sve2ShiftRightAndInsert_long", ["Isa"] = "Sve2", ["Method"] = "ShiftRightAndInsert", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightAndInsert.template", new Dictionary { ["TestName"] = "Sve2ShiftRightAndInsert_byte", ["Isa"] = "Sve2", ["Method"] = "ShiftRightAndInsert", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightAndInsert.template", new Dictionary { ["TestName"] = "Sve2ShiftRightAndInsert_ushort", ["Isa"] = "Sve2", ["Method"] = "ShiftRightAndInsert", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightAndInsert.template", new Dictionary { ["TestName"] = "Sve2ShiftRightAndInsert_uint", ["Isa"] = "Sve2", ["Method"] = "ShiftRightAndInsert", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightAndInsert.template", new Dictionary { ["TestName"] = "Sve2ShiftRightAndInsert_ulong", ["Isa"] = "Sve2", ["Method"] = "ShiftRightAndInsert", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticAdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticAdd_sbyte", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticAdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticAdd_short", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticAdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticAdd_int", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticAdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticAdd_long", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticNarrowingSaturateEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticNarrowingSaturateEven_sbyte_short", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticNarrowingSaturateEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticNarrowingSaturateEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticNarrowingSaturateEven_short_int", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticNarrowingSaturateEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticNarrowingSaturateEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticNarrowingSaturateEven_int_long", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticNarrowingSaturateEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticNarrowingSaturateEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticNarrowingSaturateEven_byte_ushort", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticNarrowingSaturateEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticNarrowingSaturateEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticNarrowingSaturateEven_ushort_uint", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticNarrowingSaturateEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticNarrowingSaturateEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticNarrowingSaturateEven_uint_ulong", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticNarrowingSaturateEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticNarrowingSaturateOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticNarrowingSaturateOdd_sbyte_short", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticNarrowingSaturateOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticNarrowingSaturateOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticNarrowingSaturateOdd_short_int", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticNarrowingSaturateOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticNarrowingSaturateOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticNarrowingSaturateOdd_int_long", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticNarrowingSaturateOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticNarrowingSaturateOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticNarrowingSaturateOdd_byte_ushort", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticNarrowingSaturateOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticNarrowingSaturateOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticNarrowingSaturateOdd_ushort_uint", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticNarrowingSaturateOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticNarrowingSaturateOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticNarrowingSaturateOdd_uint_ulong", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticNarrowingSaturateOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticNarrowingSaturateUnsignedEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticNarrowingSaturateUnsignedEven_byte_short", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticNarrowingSaturateUnsignedEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticNarrowingSaturateUnsignedEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticNarrowingSaturateUnsignedEven_ushort_int", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticNarrowingSaturateUnsignedEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticNarrowingSaturateUnsignedEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticNarrowingSaturateUnsignedEven_uint_long", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticNarrowingSaturateUnsignedEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticNarrowingSaturateUnsignedOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticNarrowingSaturateUnsignedOdd_byte_short", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticNarrowingSaturateUnsignedOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticNarrowingSaturateUnsignedOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticNarrowingSaturateUnsignedOdd_ushort_int", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticNarrowingSaturateUnsignedOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticNarrowingSaturateUnsignedOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticNarrowingSaturateUnsignedOdd_uint_long", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticNarrowingSaturateUnsignedOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticRounded.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticRounded_sbyte", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticRounded", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticRounded.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticRounded_short", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticRounded", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticRounded.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticRounded_int", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticRounded", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticRounded.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticRounded_long", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticRounded", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticRoundedAdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticRoundedAdd_sbyte", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticRoundedAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticRoundedAdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticRoundedAdd_short", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticRoundedAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticRoundedAdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticRoundedAdd_int", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticRoundedAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticRoundedAdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticRoundedAdd_long", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticRoundedAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticRoundedNarrowingSaturateEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticRoundedNarrowingSaturateEven_sbyte_short", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticRoundedNarrowingSaturateEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticRoundedNarrowingSaturateEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticRoundedNarrowingSaturateEven_short_int", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticRoundedNarrowingSaturateEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticRoundedNarrowingSaturateEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticRoundedNarrowingSaturateEven_int_long", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticRoundedNarrowingSaturateEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticRoundedNarrowingSaturateOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticRoundedNarrowingSaturateOdd_sbyte_short", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticRoundedNarrowingSaturateOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticRoundedNarrowingSaturateOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticRoundedNarrowingSaturateOdd_short_int", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticRoundedNarrowingSaturateOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticRoundedNarrowingSaturateOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticRoundedNarrowingSaturateOdd_int_long", ["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticRoundedNarrowingSaturateOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven_byte_short",["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven_ushort_int",["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven_uint_long",["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd_byte_short",["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd_ushort_int",["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd_uint_long",["Isa"] = "Sve2", ["Method"] = "ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalAdd.template", new Dictionary { ["TestName"] = "Sve2ShiftRightLogicalAdd_byte", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalAdd.template", new Dictionary { ["TestName"] = "Sve2ShiftRightLogicalAdd_ushort", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalAdd.template", new Dictionary { ["TestName"] = "Sve2ShiftRightLogicalAdd_uint", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalAdd.template", new Dictionary { ["TestName"] = "Sve2ShiftRightLogicalAdd_ulong", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalNarrowingEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalNarrowingEven_sbyte_short", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalNarrowingEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalNarrowingEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalNarrowingEven_short_int", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalNarrowingEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalNarrowingEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalNarrowingEven_int_long", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalNarrowingEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalNarrowingEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalNarrowingEven_byte_ushort", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalNarrowingEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalNarrowingEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalNarrowingEven_ushort_uint", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalNarrowingEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalNarrowingEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalNarrowingEven_uint_ulong", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalNarrowingEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalNarrowingOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalNarrowingOdd_sbyte_short", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalNarrowingOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalNarrowingOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalNarrowingOdd_short_int", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalNarrowingOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalNarrowingOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalNarrowingOdd_int_long", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalNarrowingOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalNarrowingOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalNarrowingOdd_byte_ushort", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalNarrowingOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalNarrowingOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalNarrowingOdd_ushort_uint", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalNarrowingOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalNarrowingOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalNarrowingOdd_uint_ulong", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalNarrowingOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalRounded.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalRounded_byte", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalRounded", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalRounded.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalRounded_ushort", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalRounded", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalRounded.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalRounded_uint", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalRounded", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalRounded.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalRounded_ulong", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalRounded", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalRoundedAdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalRoundedAdd_byte", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalRoundedAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalRoundedAdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalRoundedAdd_ushort", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalRoundedAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalRoundedAdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalRoundedAdd_uint", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalRoundedAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalRoundedAdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalRoundedAdd_ulong", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalRoundedAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalRoundedNarrowingEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalRoundedNarrowingEven_sbyte_short", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalRoundedNarrowingEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalRoundedNarrowingEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalRoundedNarrowingEven_short_int", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalRoundedNarrowingEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalRoundedNarrowingEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalRoundedNarrowingEven_int_long", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalRoundedNarrowingEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalRoundedNarrowingEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalRoundedNarrowingEven_byte_ushort", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalRoundedNarrowingEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalRoundedNarrowingEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalRoundedNarrowingEven_ushort_uint", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalRoundedNarrowingEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalRoundedNarrowingEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalRoundedNarrowingEven_uint_ulong", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalRoundedNarrowingEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalRoundedNarrowingOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalRoundedNarrowingOdd_sbyte_short", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalRoundedNarrowingOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalRoundedNarrowingOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalRoundedNarrowingOdd_short_int", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalRoundedNarrowingOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalRoundedNarrowingOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalRoundedNarrowingOdd_int_long", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalRoundedNarrowingOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalRoundedNarrowingOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalRoundedNarrowingOdd_byte_ushort", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalRoundedNarrowingOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalRoundedNarrowingOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalRoundedNarrowingOdd_ushort_uint", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalRoundedNarrowingOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalRoundedNarrowingOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalRoundedNarrowingOdd_uint_ulong", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalRoundedNarrowingOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalRoundedNarrowingSaturateEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalRoundedNarrowingSaturateEven_byte_ushort", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalRoundedNarrowingSaturateEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalRoundedNarrowingSaturateEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalRoundedNarrowingSaturateEven_ushort_uint", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalRoundedNarrowingSaturateEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalRoundedNarrowingSaturateEven.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalRoundedNarrowingSaturateEven_uint_ulong", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalRoundedNarrowingSaturateEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalRoundedNarrowingSaturateOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalRoundedNarrowingSaturateOdd_byte_ushort", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalRoundedNarrowingSaturateOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalRoundedNarrowingSaturateOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalRoundedNarrowingSaturateOdd_ushort_uint", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalRoundedNarrowingSaturateOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2ShiftRightLogicalRoundedNarrowingSaturateOdd.template",new Dictionary {["TestName"] = "Sve2ShiftRightLogicalRoundedNarrowingSaturateOdd_uint_ulong", ["Isa"] = "Sve2", ["Method"] = "ShiftRightLogicalRoundedNarrowingSaturateOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2Xor.template", new Dictionary { ["TestName"] = "Sve2Xor_sbyte", ["Isa"] = "Sve2", ["Method"] = "Xor", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2Xor.template", new Dictionary { ["TestName"] = "Sve2Xor_short", ["Isa"] = "Sve2", ["Method"] = "Xor", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2Xor.template", new Dictionary { ["TestName"] = "Sve2Xor_int", ["Isa"] = "Sve2", ["Method"] = "Xor", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2Xor.template", new Dictionary { ["TestName"] = "Sve2Xor_long", ["Isa"] = "Sve2", ["Method"] = "Xor", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2Xor.template", new Dictionary { ["TestName"] = "Sve2Xor_byte", ["Isa"] = "Sve2", ["Method"] = "Xor", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2Xor.template", new Dictionary { ["TestName"] = "Sve2Xor_ushort", ["Isa"] = "Sve2", ["Method"] = "Xor", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2Xor.template", new Dictionary { ["TestName"] = "Sve2Xor_uint", ["Isa"] = "Sve2", ["Method"] = "Xor", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2Xor.template", new Dictionary { ["TestName"] = "Sve2Xor_ulong", ["Isa"] = "Sve2", ["Method"] = "Xor", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2XorRotateRight.template", new Dictionary { ["TestName"] = "Sve2XorRotateRight_sbyte", ["Isa"] = "Sve2", ["Method"] = "XorRotateRight", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2XorRotateRight.template", new Dictionary { ["TestName"] = "Sve2XorRotateRight_short", ["Isa"] = "Sve2", ["Method"] = "XorRotateRight", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2XorRotateRight.template", new Dictionary { ["TestName"] = "Sve2XorRotateRight_int", ["Isa"] = "Sve2", ["Method"] = "XorRotateRight", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2XorRotateRight.template", new Dictionary { ["TestName"] = "Sve2XorRotateRight_long", ["Isa"] = "Sve2", ["Method"] = "XorRotateRight", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2XorRotateRight.template", new Dictionary { ["TestName"] = "Sve2XorRotateRight_byte", ["Isa"] = "Sve2", ["Method"] = "XorRotateRight", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2XorRotateRight.template", new Dictionary { ["TestName"] = "Sve2XorRotateRight_ushort", ["Isa"] = "Sve2", ["Method"] = "XorRotateRight", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2XorRotateRight.template", new Dictionary { ["TestName"] = "Sve2XorRotateRight_uint", ["Isa"] = "Sve2", ["Method"] = "XorRotateRight", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2XorRotateRight.template", new Dictionary { ["TestName"] = "Sve2XorRotateRight_ulong", ["Isa"] = "Sve2", ["Method"] = "XorRotateRight", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + + +// Sve2 bitmanipulate + ("Sve2InterleavingXorLowerUpper.template",new Dictionary {["TestName"] = "Sve2InterleavingXorLowerUpper_sbyte", ["Isa"] = "Sve2", ["Method"] = "InterleavingXorLowerUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2InterleavingXorLowerUpper.template",new Dictionary {["TestName"] = "Sve2InterleavingXorLowerUpper_short", ["Isa"] = "Sve2", ["Method"] = "InterleavingXorLowerUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2InterleavingXorLowerUpper.template",new Dictionary {["TestName"] = "Sve2InterleavingXorLowerUpper_int", ["Isa"] = "Sve2", ["Method"] = "InterleavingXorLowerUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2InterleavingXorLowerUpper.template",new Dictionary {["TestName"] = "Sve2InterleavingXorLowerUpper_long", ["Isa"] = "Sve2", ["Method"] = "InterleavingXorLowerUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2InterleavingXorLowerUpper.template",new Dictionary {["TestName"] = "Sve2InterleavingXorLowerUpper_byte", ["Isa"] = "Sve2", ["Method"] = "InterleavingXorLowerUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2InterleavingXorLowerUpper.template",new Dictionary {["TestName"] = "Sve2InterleavingXorLowerUpper_ushort", ["Isa"] = "Sve2", ["Method"] = "InterleavingXorLowerUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2InterleavingXorLowerUpper.template",new Dictionary {["TestName"] = "Sve2InterleavingXorLowerUpper_uint", ["Isa"] = "Sve2", ["Method"] = "InterleavingXorLowerUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2InterleavingXorLowerUpper.template",new Dictionary {["TestName"] = "Sve2InterleavingXorLowerUpper_ulong", ["Isa"] = "Sve2", ["Method"] = "InterleavingXorLowerUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2InterleavingXorUpperLower.template",new Dictionary {["TestName"] = "Sve2InterleavingXorUpperLower_sbyte", ["Isa"] = "Sve2", ["Method"] = "InterleavingXorUpperLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2InterleavingXorUpperLower.template",new Dictionary {["TestName"] = "Sve2InterleavingXorUpperLower_short", ["Isa"] = "Sve2", ["Method"] = "InterleavingXorUpperLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2InterleavingXorUpperLower.template",new Dictionary {["TestName"] = "Sve2InterleavingXorUpperLower_int", ["Isa"] = "Sve2", ["Method"] = "InterleavingXorUpperLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2InterleavingXorUpperLower.template",new Dictionary {["TestName"] = "Sve2InterleavingXorUpperLower_long", ["Isa"] = "Sve2", ["Method"] = "InterleavingXorUpperLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("Sve2InterleavingXorUpperLower.template",new Dictionary {["TestName"] = "Sve2InterleavingXorUpperLower_byte", ["Isa"] = "Sve2", ["Method"] = "InterleavingXorUpperLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2InterleavingXorUpperLower.template",new Dictionary {["TestName"] = "Sve2InterleavingXorUpperLower_ushort", ["Isa"] = "Sve2", ["Method"] = "InterleavingXorUpperLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2InterleavingXorUpperLower.template",new Dictionary {["TestName"] = "Sve2InterleavingXorUpperLower_uint", ["Isa"] = "Sve2", ["Method"] = "InterleavingXorUpperLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2InterleavingXorUpperLower.template",new Dictionary {["TestName"] = "Sve2InterleavingXorUpperLower_ulong", ["Isa"] = "Sve2", ["Method"] = "InterleavingXorUpperLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2MoveWideningLower.template", new Dictionary { ["TestName"] = "Sve2MoveWideningLower_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "MoveWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2MoveWideningLower.template", new Dictionary { ["TestName"] = "Sve2MoveWideningLower_int_short", ["Isa"] = "Sve2", ["Method"] = "MoveWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2MoveWideningLower.template", new Dictionary { ["TestName"] = "Sve2MoveWideningLower_long_int", ["Isa"] = "Sve2", ["Method"] = "MoveWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2MoveWideningLower.template", new Dictionary { ["TestName"] = "Sve2MoveWideningLower_ushort_byte", ["Isa"] = "Sve2", ["Method"] = "MoveWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2MoveWideningLower.template", new Dictionary { ["TestName"] = "Sve2MoveWideningLower_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "MoveWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2MoveWideningLower.template", new Dictionary { ["TestName"] = "Sve2MoveWideningLower_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "MoveWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2MoveWideningUpper.template", new Dictionary { ["TestName"] = "Sve2MoveWideningUpper_short_sbyte", ["Isa"] = "Sve2", ["Method"] = "MoveWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("Sve2MoveWideningUpper.template", new Dictionary { ["TestName"] = "Sve2MoveWideningUpper_int_short", ["Isa"] = "Sve2", ["Method"] = "MoveWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("Sve2MoveWideningUpper.template", new Dictionary { ["TestName"] = "Sve2MoveWideningUpper_long_int", ["Isa"] = "Sve2", ["Method"] = "MoveWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("Sve2MoveWideningUpper.template", new Dictionary { ["TestName"] = "Sve2MoveWideningUpper_ushort_byte", ["Isa"] = "Sve2", ["Method"] = "MoveWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2MoveWideningUpper.template", new Dictionary { ["TestName"] = "Sve2MoveWideningUpper_uint_ushort", ["Isa"] = "Sve2", ["Method"] = "MoveWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2MoveWideningUpper.template", new Dictionary { ["TestName"] = "Sve2MoveWideningUpper_ulong_uint", ["Isa"] = "Sve2", ["Method"] = "MoveWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2VectorTableLookup.template", new Dictionary { ["TestName"] = "Sve2VectorTableLookup_float_uint", ["Isa"] = "Sve2", ["Method"] = "VectorTableLookup", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector_Vector",["Op1BaseType"] = "Single_Single",["Op2VectorType"] = "Vector",["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2VectorTableLookup.template", new Dictionary { ["TestName"] = "Sve2VectorTableLookup_double_ulong", ["Isa"] = "Sve2", ["Method"] = "VectorTableLookup", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector_Vector",["Op1BaseType"] = "Double_Double",["Op2VectorType"] = "Vector",["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2VectorTableLookup.template", new Dictionary { ["TestName"] = "Sve2VectorTableLookup_sbyte_byte", ["Isa"] = "Sve2", ["Method"] = "VectorTableLookup", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector_Vector",["Op1BaseType"] = "SByte_SByte",["Op2VectorType"] = "Vector",["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2VectorTableLookup.template", new Dictionary { ["TestName"] = "Sve2VectorTableLookup_short_ushort", ["Isa"] = "Sve2", ["Method"] = "VectorTableLookup", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector_Vector",["Op1BaseType"] = "Int16_Int16",["Op2VectorType"] = "Vector",["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2VectorTableLookup.template", new Dictionary { ["TestName"] = "Sve2VectorTableLookup_int_uint", ["Isa"] = "Sve2", ["Method"] = "VectorTableLookup", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector_Vector",["Op1BaseType"] = "Int32_Int32",["Op2VectorType"] = "Vector",["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2VectorTableLookup.template", new Dictionary { ["TestName"] = "Sve2VectorTableLookup_long_ulong", ["Isa"] = "Sve2", ["Method"] = "VectorTableLookup", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector_Vector",["Op1BaseType"] = "Int64_Int64",["Op2VectorType"] = "Vector",["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2VectorTableLookup.template", new Dictionary { ["TestName"] = "Sve2VectorTableLookup_byte", ["Isa"] = "Sve2", ["Method"] = "VectorTableLookup", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector_Vector",["Op1BaseType"] = "Byte_Byte",["Op2VectorType"] = "Vector",["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2VectorTableLookup.template", new Dictionary { ["TestName"] = "Sve2VectorTableLookup_ushort", ["Isa"] = "Sve2", ["Method"] = "VectorTableLookup", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector_Vector",["Op1BaseType"] = "UInt16_UInt16",["Op2VectorType"] = "Vector",["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2VectorTableLookup.template", new Dictionary { ["TestName"] = "Sve2VectorTableLookup_uint", ["Isa"] = "Sve2", ["Method"] = "VectorTableLookup", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector_Vector",["Op1BaseType"] = "UInt32_UInt32",["Op2VectorType"] = "Vector",["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2VectorTableLookup.template", new Dictionary { ["TestName"] = "Sve2VectorTableLookup_ulong", ["Isa"] = "Sve2", ["Method"] = "VectorTableLookup", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector_Vector",["Op1BaseType"] = "UInt64_UInt64",["Op2VectorType"] = "Vector",["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2VectorTableLookupExtension.template",new Dictionary {["TestName"] = "Sve2VectorTableLookupExtension_float_uint", ["Isa"] = "Sve2", ["Method"] = "VectorTableLookupExtension", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2VectorTableLookupExtension.template",new Dictionary {["TestName"] = "Sve2VectorTableLookupExtension_double_ulong", ["Isa"] = "Sve2", ["Method"] = "VectorTableLookupExtension", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2VectorTableLookupExtension.template",new Dictionary {["TestName"] = "Sve2VectorTableLookupExtension_sbyte_byte", ["Isa"] = "Sve2", ["Method"] = "VectorTableLookupExtension", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2VectorTableLookupExtension.template",new Dictionary {["TestName"] = "Sve2VectorTableLookupExtension_short_ushort", ["Isa"] = "Sve2", ["Method"] = "VectorTableLookupExtension", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2VectorTableLookupExtension.template",new Dictionary {["TestName"] = "Sve2VectorTableLookupExtension_int_uint", ["Isa"] = "Sve2", ["Method"] = "VectorTableLookupExtension", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2VectorTableLookupExtension.template",new Dictionary {["TestName"] = "Sve2VectorTableLookupExtension_long_ulong", ["Isa"] = "Sve2", ["Method"] = "VectorTableLookupExtension", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("Sve2VectorTableLookupExtension.template",new Dictionary {["TestName"] = "Sve2VectorTableLookupExtension_byte", ["Isa"] = "Sve2", ["Method"] = "VectorTableLookupExtension", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("Sve2VectorTableLookupExtension.template",new Dictionary {["TestName"] = "Sve2VectorTableLookupExtension_ushort", ["Isa"] = "Sve2", ["Method"] = "VectorTableLookupExtension", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("Sve2VectorTableLookupExtension.template",new Dictionary {["TestName"] = "Sve2VectorTableLookupExtension_uint", ["Isa"] = "Sve2", ["Method"] = "VectorTableLookupExtension", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("Sve2VectorTableLookupExtension.template",new Dictionary {["TestName"] = "Sve2VectorTableLookupExtension_ulong", ["Isa"] = "Sve2", ["Method"] = "VectorTableLookupExtension", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + + +// SveBf16 + ("SveBf16Bfloat16DotProduct.template",new Dictionary { ["TestName"] = "SveBf16Bfloat16DotProduct_float_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "Bfloat16DotProduct", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16",["Op3VectorType"] = "Vector", ["Op3BaseType"] = "BFloat16",["LargestVectorSize"] = "8",}), + ("SveBf16Bfloat16MatrixMultiplyAccumulate.template",new Dictionary {["TestName"] = "SveBf16Bfloat16MatrixMultiplyAccumulate_float_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "Bfloat16MatrixMultiplyAccumulate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16",["Op3VectorType"] = "Vector", ["Op3BaseType"] = "BFloat16",["LargestVectorSize"] = "8",}), + ("SveBf16Bfloat16MultiplyAddWideningToSinglePrecisionLower.template",new Dictionary {["TestName"] = "SveBf16Bfloat16MultiplyAddWideningToSinglePrecisionLower_float_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "Bfloat16MultiplyAddWideningToSinglePrecisionLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16",["Op3VectorType"] = "Vector", ["Op3BaseType"] = "BFloat16",["LargestVectorSize"] = "8",}), + ("SveBf16Bfloat16MultiplyAddWideningToSinglePrecisionLower.template",new Dictionary {["TestName"] = "SveBf16Bfloat16MultiplyAddWideningToSinglePrecisionLower_float_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "Bfloat16MultiplyAddWideningToSinglePrecisionLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16",["Op3VectorType"] = "Vector", ["Op3BaseType"] = "BFloat16",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("SveBf16Bfloat16MultiplyAddWideningToSinglePrecisionUpper.template",new Dictionary {["TestName"] = "SveBf16Bfloat16MultiplyAddWideningToSinglePrecisionUpper_float_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "Bfloat16MultiplyAddWideningToSinglePrecisionUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16",["Op3VectorType"] = "Vector", ["Op3BaseType"] = "BFloat16",["LargestVectorSize"] = "8",}), + ("SveBf16Bfloat16MultiplyAddWideningToSinglePrecisionUpper.template",new Dictionary {["TestName"] = "SveBf16Bfloat16MultiplyAddWideningToSinglePrecisionUpper_float_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "Bfloat16MultiplyAddWideningToSinglePrecisionUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16",["Op3VectorType"] = "Vector", ["Op3BaseType"] = "BFloat16",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("SveBf16ConcatenateEvenInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveBf16ConcatenateEvenInt128FromTwoInputs_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "ConcatenateEvenInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16",["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16ConcatenateOddInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveBf16ConcatenateOddInt128FromTwoInputs_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "ConcatenateOddInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16",["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16ConditionalExtractAfterLastActiveElement.template",new Dictionary {["TestName"] = "SveBf16ConditionalExtractAfterLastActiveElement_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "ConditionalExtractAfterLastActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16",["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16",["Op3VectorType"] = "Vector", ["Op3BaseType"] = "BFloat16",["LargestVectorSize"] = "8",}), + ("SveBf16ConditionalExtractAfterLastActiveElement.template",new Dictionary {["TestName"] = "SveBf16ConditionalExtractAfterLastActiveElement_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "ConditionalExtractAfterLastActiveElement", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16", ["Op2BaseType"] = "BFloat16",["Op3VectorType"] = "Vector", ["Op3BaseType"] = "BFloat16",["LargestVectorSize"] = "8",}), + ("SveBf16ConditionalExtractAfterLastActiveElementAndReplicate.template",new Dictionary {["TestName"] = "SveBf16ConditionalExtractAfterLastActiveElementAndReplicate_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "ConditionalExtractAfterLastActiveElementAndReplicate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16",["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16",["Op3VectorType"] = "Vector", ["Op3BaseType"] = "BFloat16",["LargestVectorSize"] = "8",}), + ("SveBf16ConditionalExtractLastActiveElement.template",new Dictionary {["TestName"] = "SveBf16ConditionalExtractLastActiveElement_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "ConditionalExtractLastActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16",["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16",["Op3VectorType"] = "Vector", ["Op3BaseType"] = "BFloat16",["LargestVectorSize"] = "8",}), + ("SveBf16ConditionalExtractLastActiveElement.template",new Dictionary {["TestName"] = "SveBf16ConditionalExtractLastActiveElement_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "ConditionalExtractLastActiveElement", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16", ["Op2BaseType"] = "BFloat16",["Op3VectorType"] = "Vector", ["Op3BaseType"] = "BFloat16",["LargestVectorSize"] = "8",}), + ("SveBf16ConditionalExtractLastActiveElementAndReplicate.template",new Dictionary {["TestName"] = "SveBf16ConditionalExtractLastActiveElementAndReplicate_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "ConditionalExtractLastActiveElementAndReplicate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16",["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16",["Op3VectorType"] = "Vector", ["Op3BaseType"] = "BFloat16",["LargestVectorSize"] = "8",}), + ("SveBf16ConditionalSelect.template", new Dictionary { ["TestName"] = "SveBf16ConditionalSelect_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "ConditionalSelect", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16",["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16",["Op3VectorType"] = "Vector", ["Op3BaseType"] = "BFloat16",["LargestVectorSize"] = "8",}), + ("SveBf16ConvertToBFloat16.template", new Dictionary { ["TestName"] = "SveBf16ConvertToBFloat16_bfloat16_float", ["Isa"] = "SveBf16", ["Method"] = "ConvertToBFloat16", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveBf16CreateFalseMaskBFloat16.template",new Dictionary {["TestName"] = "SveBf16CreateFalseMaskBFloat16_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "CreateFalseMaskBFloat16", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16CreateTrueMaskBFloat16.template",new Dictionary {["TestName"] = "SveBf16CreateTrueMaskBFloat16_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "CreateTrueMaskBFloat16", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16", ["Op1BaseType"] = "SveMaskPattern", ["LargestVectorSize"] = "8",}), + ("SveBf16CreateWhileReadAfterWriteMask.template",new Dictionary {["TestName"] = "SveBf16CreateWhileReadAfterWriteMask_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "CreateWhileReadAfterWriteMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16", ["Op1BaseType"] = "BFloat16", ["Op2BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16CreateWhileWriteAfterReadMask.template",new Dictionary {["TestName"] = "SveBf16CreateWhileWriteAfterReadMask_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "CreateWhileWriteAfterReadMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16", ["Op1BaseType"] = "BFloat16", ["Op2BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16DotProductBySelectedScalar.template",new Dictionary {["TestName"] = "SveBf16DotProductBySelectedScalar_float_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "DotProductBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16",["Op3VectorType"] = "Vector", ["Op3BaseType"] = "BFloat16",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("SveBf16DownConvertNarrowingUpper.template",new Dictionary {["TestName"] = "SveBf16DownConvertNarrowingUpper_bfloat16_float", ["Isa"] = "SveBf16", ["Method"] = "DownConvertNarrowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveBf16DuplicateSelectedScalarToVector.template",new Dictionary {["TestName"] = "SveBf16DuplicateSelectedScalarToVector_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "DuplicateSelectedScalarToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveBf16ExtractAfterLastScalar.template",new Dictionary {["TestName"] = "SveBf16ExtractAfterLastScalar_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "ExtractAfterLastScalar", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16ExtractAfterLastVector.template",new Dictionary {["TestName"] = "SveBf16ExtractAfterLastVector_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "ExtractAfterLastVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16ExtractLastScalar.template", new Dictionary { ["TestName"] = "SveBf16ExtractLastScalar_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "ExtractLastScalar", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16ExtractLastVector.template", new Dictionary { ["TestName"] = "SveBf16ExtractLastVector_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "ExtractLastVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16ExtractVector.template", new Dictionary { ["TestName"] = "SveBf16ExtractVector_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "ExtractVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16",["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveBf16GetActiveElementCount.template",new Dictionary {["TestName"] = "SveBf16GetActiveElementCount_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "GetActiveElementCount", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16",["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16InsertIntoShiftedVector.template",new Dictionary {["TestName"] = "SveBf16InsertIntoShiftedVector_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "InsertIntoShiftedVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16", ["Op2BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16InterleaveEvenInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveBf16InterleaveEvenInt128FromTwoInputs_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "InterleaveEvenInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16",["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16InterleaveInt128FromHighHalvesOfTwoInputs.template",new Dictionary {["TestName"] = "SveBf16InterleaveInt128FromHighHalvesOfTwoInputs_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "InterleaveInt128FromHighHalvesOfTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16",["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16InterleaveInt128FromLowHalvesOfTwoInputs.template",new Dictionary {["TestName"] = "SveBf16InterleaveInt128FromLowHalvesOfTwoInputs_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "InterleaveInt128FromLowHalvesOfTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16",["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16InterleaveOddInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveBf16InterleaveOddInt128FromTwoInputs_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "InterleaveOddInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16",["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16LoadVector.template", new Dictionary { ["TestName"] = "SveBf16LoadVector_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "LoadVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16", ["Op2BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16LoadVector128AndReplicateToVector.template",new Dictionary {["TestName"] = "SveBf16LoadVector128AndReplicateToVector_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "LoadVector128AndReplicateToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16", ["Op2BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16LoadVector256AndReplicateToVector.template",new Dictionary {["TestName"] = "SveBf16LoadVector256AndReplicateToVector_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "LoadVector256AndReplicateToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16", ["Op2BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16LoadVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveBf16LoadVectorFirstFaulting_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "LoadVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16", ["Op2BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16LoadVectorNonFaulting.template",new Dictionary {["TestName"] = "SveBf16LoadVectorNonFaulting_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "LoadVectorNonFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16", ["Op1BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16LoadVectorNonTemporal.template",new Dictionary {["TestName"] = "SveBf16LoadVectorNonTemporal_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "LoadVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16", ["Op2BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16LoadVectorx2.template", new Dictionary { ["TestName"] = "SveBf16LoadVectorx2_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "LoadVectorx2", ["RetVectorType"] = "Vector_Vector",["RetBaseType"] = "BFloat16_BFloat16",["Op1VectorType"] = "Vector",["Op1BaseType"] = "BFloat16", ["Op2BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16LoadVectorx3.template", new Dictionary { ["TestName"] = "SveBf16LoadVectorx3_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "LoadVectorx3", ["RetVectorType"] = "Vector_Vector_Vector",["RetBaseType"] = "BFloat16_BFloat16_BFloat16",["Op1VectorType"] = "Vector",["Op1BaseType"] = "BFloat16", ["Op2BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16LoadVectorx4.template", new Dictionary { ["TestName"] = "SveBf16LoadVectorx4_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "LoadVectorx4", ["RetVectorType"] = "Vector_Vector_Vector_Vector",["RetBaseType"] = "BFloat16_BFloat16_BFloat16_BFloat16",["Op1VectorType"] = "Vector",["Op1BaseType"] = "BFloat16",["Op2BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16PopCount.template", new Dictionary { ["TestName"] = "SveBf16PopCount_ushort_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "PopCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16ReverseElement.template", new Dictionary { ["TestName"] = "SveBf16ReverseElement_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "ReverseElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16Splice.template", new Dictionary { ["TestName"] = "SveBf16Splice_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "Splice", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16",["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16",["Op3VectorType"] = "Vector", ["Op3BaseType"] = "BFloat16",["LargestVectorSize"] = "8",}), + ("SveBf16Store.template", new Dictionary { ["TestName"] = "SveBf16Store_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16", ["Op2BaseType"] = "BFloat16",["Op3VectorType"] = "Vector", ["Op3BaseType"] = "BFloat16",["LargestVectorSize"] = "8",}), + ("SveBf16Store.template", new Dictionary { ["TestName"] = "SveBf16Store_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16", ["Op2BaseType"] = "BFloat16",["Op3VectorType"] = "Vector_Vector",["Op3BaseType"] = "BFloat16_BFloat16",["LargestVectorSize"] = "8",}), + ("SveBf16Store.template", new Dictionary { ["TestName"] = "SveBf16Store_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16", ["Op2BaseType"] = "BFloat16",["Op3VectorType"] = "Vector_Vector_Vector",["Op3BaseType"] = "BFloat16_BFloat16_BFloat16",["LargestVectorSize"] = "8",}), + ("SveBf16Store.template", new Dictionary { ["TestName"] = "SveBf16Store_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16", ["Op2BaseType"] = "BFloat16",["Op3VectorType"] = "Vector_Vector_Vector_Vector",["Op3BaseType"] = "BFloat16_BFloat16_BFloat16_BFloat16",["LargestVectorSize"] = "8",}), + ("SveBf16StoreNonTemporal.template", new Dictionary { ["TestName"] = "SveBf16StoreNonTemporal_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "StoreNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16", ["Op2BaseType"] = "BFloat16",["Op3VectorType"] = "Vector", ["Op3BaseType"] = "BFloat16",["LargestVectorSize"] = "8",}), + ("SveBf16TransposeEven.template", new Dictionary { ["TestName"] = "SveBf16TransposeEven_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "TransposeEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16",["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16TransposeOdd.template", new Dictionary { ["TestName"] = "SveBf16TransposeOdd_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "TransposeOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16",["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16UnzipEven.template", new Dictionary { ["TestName"] = "SveBf16UnzipEven_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "UnzipEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16",["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16UnzipOdd.template", new Dictionary { ["TestName"] = "SveBf16UnzipOdd_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "UnzipOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16",["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16VectorTableLookup.template", new Dictionary { ["TestName"] = "SveBf16VectorTableLookup_bfloat16_ushort", ["Isa"] = "SveBf16", ["Method"] = "VectorTableLookup", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16",["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveBf16VectorTableLookup.template", new Dictionary { ["TestName"] = "SveBf16VectorTableLookup_bfloat16_ushort", ["Isa"] = "SveBf16", ["Method"] = "VectorTableLookup", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector_Vector",["Op1BaseType"] = "BFloat16_BFloat16",["Op2VectorType"] = "Vector",["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveBf16VectorTableLookupExtension.template",new Dictionary {["TestName"] = "SveBf16VectorTableLookupExtension_bfloat16_ushort", ["Isa"] = "SveBf16", ["Method"] = "VectorTableLookupExtension", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16",["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16",["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveBf16ZipHigh.template", new Dictionary { ["TestName"] = "SveBf16ZipHigh_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "ZipHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16",["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + ("SveBf16ZipLow.template", new Dictionary { ["TestName"] = "SveBf16ZipLow_bfloat16", ["Isa"] = "SveBf16", ["Method"] = "ZipLow", ["RetVectorType"] = "Vector", ["RetBaseType"] = "BFloat16",["Op1VectorType"] = "Vector", ["Op1BaseType"] = "BFloat16",["Op2VectorType"] = "Vector", ["Op2BaseType"] = "BFloat16", ["LargestVectorSize"] = "8",}), + + +// SveF32mm + ("SveF32mmMatrixMultiplyAccumulate.template",new Dictionary {["TestName"] = "SveF32mmMatrixMultiplyAccumulate_float", ["Isa"] = "SveF32mm", ["Method"] = "MatrixMultiplyAccumulate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + + +// SveF64mm + ("SveF64mmConcatenateEvenInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmConcatenateEvenInt128FromTwoInputs_float", ["Isa"] = "SveF64mm", ["Method"] = "ConcatenateEvenInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveF64mmConcatenateEvenInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmConcatenateEvenInt128FromTwoInputs_double", ["Isa"] = "SveF64mm", ["Method"] = "ConcatenateEvenInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveF64mmConcatenateEvenInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmConcatenateEvenInt128FromTwoInputs_sbyte", ["Isa"] = "SveF64mm", ["Method"] = "ConcatenateEvenInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveF64mmConcatenateEvenInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmConcatenateEvenInt128FromTwoInputs_short", ["Isa"] = "SveF64mm", ["Method"] = "ConcatenateEvenInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveF64mmConcatenateEvenInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmConcatenateEvenInt128FromTwoInputs_int", ["Isa"] = "SveF64mm", ["Method"] = "ConcatenateEvenInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveF64mmConcatenateEvenInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmConcatenateEvenInt128FromTwoInputs_long", ["Isa"] = "SveF64mm", ["Method"] = "ConcatenateEvenInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveF64mmConcatenateEvenInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmConcatenateEvenInt128FromTwoInputs_byte", ["Isa"] = "SveF64mm", ["Method"] = "ConcatenateEvenInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveF64mmConcatenateEvenInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmConcatenateEvenInt128FromTwoInputs_ushort", ["Isa"] = "SveF64mm", ["Method"] = "ConcatenateEvenInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveF64mmConcatenateEvenInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmConcatenateEvenInt128FromTwoInputs_uint", ["Isa"] = "SveF64mm", ["Method"] = "ConcatenateEvenInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveF64mmConcatenateEvenInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmConcatenateEvenInt128FromTwoInputs_ulong", ["Isa"] = "SveF64mm", ["Method"] = "ConcatenateEvenInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveF64mmConcatenateOddInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmConcatenateOddInt128FromTwoInputs_float", ["Isa"] = "SveF64mm", ["Method"] = "ConcatenateOddInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveF64mmConcatenateOddInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmConcatenateOddInt128FromTwoInputs_double", ["Isa"] = "SveF64mm", ["Method"] = "ConcatenateOddInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveF64mmConcatenateOddInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmConcatenateOddInt128FromTwoInputs_sbyte", ["Isa"] = "SveF64mm", ["Method"] = "ConcatenateOddInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveF64mmConcatenateOddInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmConcatenateOddInt128FromTwoInputs_short", ["Isa"] = "SveF64mm", ["Method"] = "ConcatenateOddInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveF64mmConcatenateOddInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmConcatenateOddInt128FromTwoInputs_int", ["Isa"] = "SveF64mm", ["Method"] = "ConcatenateOddInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveF64mmConcatenateOddInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmConcatenateOddInt128FromTwoInputs_long", ["Isa"] = "SveF64mm", ["Method"] = "ConcatenateOddInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveF64mmConcatenateOddInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmConcatenateOddInt128FromTwoInputs_byte", ["Isa"] = "SveF64mm", ["Method"] = "ConcatenateOddInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveF64mmConcatenateOddInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmConcatenateOddInt128FromTwoInputs_ushort", ["Isa"] = "SveF64mm", ["Method"] = "ConcatenateOddInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveF64mmConcatenateOddInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmConcatenateOddInt128FromTwoInputs_uint", ["Isa"] = "SveF64mm", ["Method"] = "ConcatenateOddInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveF64mmConcatenateOddInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmConcatenateOddInt128FromTwoInputs_ulong", ["Isa"] = "SveF64mm", ["Method"] = "ConcatenateOddInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveEvenInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveEvenInt128FromTwoInputs_float", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveEvenInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveEvenInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveEvenInt128FromTwoInputs_double", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveEvenInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveEvenInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveEvenInt128FromTwoInputs_sbyte", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveEvenInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveEvenInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveEvenInt128FromTwoInputs_short", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveEvenInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveEvenInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveEvenInt128FromTwoInputs_int", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveEvenInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveEvenInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveEvenInt128FromTwoInputs_long", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveEvenInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveEvenInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveEvenInt128FromTwoInputs_byte", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveEvenInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveEvenInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveEvenInt128FromTwoInputs_ushort", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveEvenInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveEvenInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveEvenInt128FromTwoInputs_uint", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveEvenInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveEvenInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveEvenInt128FromTwoInputs_ulong", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveEvenInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveInt128FromHighHalvesOfTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveInt128FromHighHalvesOfTwoInputs_float", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveInt128FromHighHalvesOfTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveInt128FromHighHalvesOfTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveInt128FromHighHalvesOfTwoInputs_double", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveInt128FromHighHalvesOfTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveInt128FromHighHalvesOfTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveInt128FromHighHalvesOfTwoInputs_sbyte", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveInt128FromHighHalvesOfTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveInt128FromHighHalvesOfTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveInt128FromHighHalvesOfTwoInputs_short", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveInt128FromHighHalvesOfTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveInt128FromHighHalvesOfTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveInt128FromHighHalvesOfTwoInputs_int", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveInt128FromHighHalvesOfTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveInt128FromHighHalvesOfTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveInt128FromHighHalvesOfTwoInputs_long", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveInt128FromHighHalvesOfTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveInt128FromHighHalvesOfTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveInt128FromHighHalvesOfTwoInputs_byte", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveInt128FromHighHalvesOfTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveInt128FromHighHalvesOfTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveInt128FromHighHalvesOfTwoInputs_ushort", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveInt128FromHighHalvesOfTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveInt128FromHighHalvesOfTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveInt128FromHighHalvesOfTwoInputs_uint", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveInt128FromHighHalvesOfTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveInt128FromHighHalvesOfTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveInt128FromHighHalvesOfTwoInputs_ulong", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveInt128FromHighHalvesOfTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveInt128FromLowHalvesOfTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveInt128FromLowHalvesOfTwoInputs_float", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveInt128FromLowHalvesOfTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveInt128FromLowHalvesOfTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveInt128FromLowHalvesOfTwoInputs_double", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveInt128FromLowHalvesOfTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveInt128FromLowHalvesOfTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveInt128FromLowHalvesOfTwoInputs_sbyte", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveInt128FromLowHalvesOfTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveInt128FromLowHalvesOfTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveInt128FromLowHalvesOfTwoInputs_short", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveInt128FromLowHalvesOfTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveInt128FromLowHalvesOfTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveInt128FromLowHalvesOfTwoInputs_int", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveInt128FromLowHalvesOfTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveInt128FromLowHalvesOfTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveInt128FromLowHalvesOfTwoInputs_long", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveInt128FromLowHalvesOfTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveInt128FromLowHalvesOfTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveInt128FromLowHalvesOfTwoInputs_byte", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveInt128FromLowHalvesOfTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveInt128FromLowHalvesOfTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveInt128FromLowHalvesOfTwoInputs_ushort", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveInt128FromLowHalvesOfTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveInt128FromLowHalvesOfTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveInt128FromLowHalvesOfTwoInputs_uint", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveInt128FromLowHalvesOfTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveInt128FromLowHalvesOfTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveInt128FromLowHalvesOfTwoInputs_ulong", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveInt128FromLowHalvesOfTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveOddInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveOddInt128FromTwoInputs_float", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveOddInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveOddInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveOddInt128FromTwoInputs_double", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveOddInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveOddInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveOddInt128FromTwoInputs_sbyte", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveOddInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveOddInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveOddInt128FromTwoInputs_short", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveOddInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveOddInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveOddInt128FromTwoInputs_int", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveOddInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveOddInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveOddInt128FromTwoInputs_long", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveOddInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveOddInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveOddInt128FromTwoInputs_byte", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveOddInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveOddInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveOddInt128FromTwoInputs_ushort", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveOddInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveOddInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveOddInt128FromTwoInputs_uint", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveOddInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveF64mmInterleaveOddInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveF64mmInterleaveOddInt128FromTwoInputs_ulong", ["Isa"] = "SveF64mm", ["Method"] = "InterleaveOddInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveF64mmLoadVector256AndReplicateToVector.template",new Dictionary {["TestName"] = "SveF64mmLoadVector256AndReplicateToVector_float", ["Isa"] = "SveF64mm", ["Method"] = "LoadVector256AndReplicateToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveF64mmLoadVector256AndReplicateToVector.template",new Dictionary {["TestName"] = "SveF64mmLoadVector256AndReplicateToVector_double", ["Isa"] = "SveF64mm", ["Method"] = "LoadVector256AndReplicateToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveF64mmLoadVector256AndReplicateToVector.template",new Dictionary {["TestName"] = "SveF64mmLoadVector256AndReplicateToVector_sbyte", ["Isa"] = "SveF64mm", ["Method"] = "LoadVector256AndReplicateToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "SByte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "SByte", ["Op2BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveF64mmLoadVector256AndReplicateToVector.template",new Dictionary {["TestName"] = "SveF64mmLoadVector256AndReplicateToVector_short", ["Isa"] = "SveF64mm", ["Method"] = "LoadVector256AndReplicateToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveF64mmLoadVector256AndReplicateToVector.template",new Dictionary {["TestName"] = "SveF64mmLoadVector256AndReplicateToVector_int", ["Isa"] = "SveF64mm", ["Method"] = "LoadVector256AndReplicateToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveF64mmLoadVector256AndReplicateToVector.template",new Dictionary {["TestName"] = "SveF64mmLoadVector256AndReplicateToVector_long", ["Isa"] = "SveF64mm", ["Method"] = "LoadVector256AndReplicateToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveF64mmLoadVector256AndReplicateToVector.template",new Dictionary {["TestName"] = "SveF64mmLoadVector256AndReplicateToVector_byte", ["Isa"] = "SveF64mm", ["Method"] = "LoadVector256AndReplicateToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveF64mmLoadVector256AndReplicateToVector.template",new Dictionary {["TestName"] = "SveF64mmLoadVector256AndReplicateToVector_ushort", ["Isa"] = "SveF64mm", ["Method"] = "LoadVector256AndReplicateToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveF64mmLoadVector256AndReplicateToVector.template",new Dictionary {["TestName"] = "SveF64mmLoadVector256AndReplicateToVector_uint", ["Isa"] = "SveF64mm", ["Method"] = "LoadVector256AndReplicateToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveF64mmLoadVector256AndReplicateToVector.template",new Dictionary {["TestName"] = "SveF64mmLoadVector256AndReplicateToVector_ulong", ["Isa"] = "SveF64mm", ["Method"] = "LoadVector256AndReplicateToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveF64mmMatrixMultiplyAccumulate.template",new Dictionary {["TestName"] = "SveF64mmMatrixMultiplyAccumulate_double", ["Isa"] = "SveF64mm", ["Method"] = "MatrixMultiplyAccumulate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Double", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + + +// SveFp16 + ("SveFp16Abs.template", new Dictionary { ["TestName"] = "SveFp16Abs_half", ["Isa"] = "SveFp16", ["Method"] = "Abs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16AbsoluteCompareGreaterThan.template",new Dictionary {["TestName"] = "SveFp16AbsoluteCompareGreaterThan_half", ["Isa"] = "SveFp16", ["Method"] = "AbsoluteCompareGreaterThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16AbsoluteCompareGreaterThanOrEqual.template",new Dictionary {["TestName"] = "SveFp16AbsoluteCompareGreaterThanOrEqual_half", ["Isa"] = "SveFp16", ["Method"] = "AbsoluteCompareGreaterThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16AbsoluteCompareLessThan.template",new Dictionary {["TestName"] = "SveFp16AbsoluteCompareLessThan_half", ["Isa"] = "SveFp16", ["Method"] = "AbsoluteCompareLessThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16AbsoluteCompareLessThanOrEqual.template",new Dictionary {["TestName"] = "SveFp16AbsoluteCompareLessThanOrEqual_half", ["Isa"] = "SveFp16", ["Method"] = "AbsoluteCompareLessThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16AbsoluteDifference.template",new Dictionary { ["TestName"] = "SveFp16AbsoluteDifference_half", ["Isa"] = "SveFp16", ["Method"] = "AbsoluteDifference", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16Add.template", new Dictionary { ["TestName"] = "SveFp16Add_half", ["Isa"] = "SveFp16", ["Method"] = "Add", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16AddAcross.template", new Dictionary { ["TestName"] = "SveFp16AddAcross_half", ["Isa"] = "SveFp16", ["Method"] = "AddAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16AddPairwise.template", new Dictionary { ["TestName"] = "SveFp16AddPairwise_half", ["Isa"] = "SveFp16", ["Method"] = "AddPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16AddRotateComplex.template", new Dictionary { ["TestName"] = "SveFp16AddRotateComplex_half", ["Isa"] = "SveFp16", ["Method"] = "AddRotateComplex", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveFp16AddSequentialAcross.template",new Dictionary {["TestName"] = "SveFp16AddSequentialAcross_half", ["Isa"] = "SveFp16", ["Method"] = "AddSequentialAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16CompareEqual.template", new Dictionary { ["TestName"] = "SveFp16CompareEqual_half", ["Isa"] = "SveFp16", ["Method"] = "CompareEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16CompareGreaterThan.template",new Dictionary { ["TestName"] = "SveFp16CompareGreaterThan_half", ["Isa"] = "SveFp16", ["Method"] = "CompareGreaterThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16CompareGreaterThanOrEqual.template",new Dictionary {["TestName"] = "SveFp16CompareGreaterThanOrEqual_half", ["Isa"] = "SveFp16", ["Method"] = "CompareGreaterThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16CompareLessThan.template", new Dictionary { ["TestName"] = "SveFp16CompareLessThan_half", ["Isa"] = "SveFp16", ["Method"] = "CompareLessThan", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16CompareLessThanOrEqual.template",new Dictionary {["TestName"] = "SveFp16CompareLessThanOrEqual_half", ["Isa"] = "SveFp16", ["Method"] = "CompareLessThanOrEqual", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16CompareNotEqualTo.template", new Dictionary { ["TestName"] = "SveFp16CompareNotEqualTo_half", ["Isa"] = "SveFp16", ["Method"] = "CompareNotEqualTo", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16CompareUnordered.template", new Dictionary { ["TestName"] = "SveFp16CompareUnordered_half", ["Isa"] = "SveFp16", ["Method"] = "CompareUnordered", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ConcatenateEvenInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveFp16ConcatenateEvenInt128FromTwoInputs_half", ["Isa"] = "SveFp16", ["Method"] = "ConcatenateEvenInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ConcatenateOddInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveFp16ConcatenateOddInt128FromTwoInputs_half", ["Isa"] = "SveFp16", ["Method"] = "ConcatenateOddInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ConditionalExtractAfterLastActiveElement.template",new Dictionary {["TestName"] = "SveFp16ConditionalExtractAfterLastActiveElement_half", ["Isa"] = "SveFp16", ["Method"] = "ConditionalExtractAfterLastActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ConditionalExtractAfterLastActiveElement.template",new Dictionary {["TestName"] = "SveFp16ConditionalExtractAfterLastActiveElement_half", ["Isa"] = "SveFp16", ["Method"] = "ConditionalExtractAfterLastActiveElement", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ConditionalExtractAfterLastActiveElementAndReplicate.template",new Dictionary {["TestName"] = "SveFp16ConditionalExtractAfterLastActiveElementAndReplicate_half", ["Isa"] = "SveFp16", ["Method"] = "ConditionalExtractAfterLastActiveElementAndReplicate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ConditionalExtractLastActiveElement.template",new Dictionary {["TestName"] = "SveFp16ConditionalExtractLastActiveElement_half", ["Isa"] = "SveFp16", ["Method"] = "ConditionalExtractLastActiveElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ConditionalExtractLastActiveElement.template",new Dictionary {["TestName"] = "SveFp16ConditionalExtractLastActiveElement_half", ["Isa"] = "SveFp16", ["Method"] = "ConditionalExtractLastActiveElement", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ConditionalExtractLastActiveElementAndReplicate.template",new Dictionary {["TestName"] = "SveFp16ConditionalExtractLastActiveElementAndReplicate_half", ["Isa"] = "SveFp16", ["Method"] = "ConditionalExtractLastActiveElementAndReplicate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ConditionalSelect.template", new Dictionary { ["TestName"] = "SveFp16ConditionalSelect_half", ["Isa"] = "SveFp16", ["Method"] = "ConditionalSelect", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ConvertToDouble.template", new Dictionary { ["TestName"] = "SveFp16ConvertToDouble_double_half", ["Isa"] = "SveFp16", ["Method"] = "ConvertToDouble", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Double", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ConvertToHalf.template", new Dictionary { ["TestName"] = "SveFp16ConvertToHalf_half_float", ["Isa"] = "SveFp16", ["Method"] = "ConvertToHalf", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveFp16ConvertToHalf.template", new Dictionary { ["TestName"] = "SveFp16ConvertToHalf_half_double", ["Isa"] = "SveFp16", ["Method"] = "ConvertToHalf", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Double", ["LargestVectorSize"] = "8",}), + ("SveFp16ConvertToHalf.template", new Dictionary { ["TestName"] = "SveFp16ConvertToHalf_half_short", ["Isa"] = "SveFp16", ["Method"] = "ConvertToHalf", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveFp16ConvertToHalf.template", new Dictionary { ["TestName"] = "SveFp16ConvertToHalf_half_int", ["Isa"] = "SveFp16", ["Method"] = "ConvertToHalf", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["LargestVectorSize"] = "8",}), + ("SveFp16ConvertToHalf.template", new Dictionary { ["TestName"] = "SveFp16ConvertToHalf_half_long", ["Isa"] = "SveFp16", ["Method"] = "ConvertToHalf", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveFp16ConvertToHalf.template", new Dictionary { ["TestName"] = "SveFp16ConvertToHalf_half_ushort", ["Isa"] = "SveFp16", ["Method"] = "ConvertToHalf", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveFp16ConvertToHalf.template", new Dictionary { ["TestName"] = "SveFp16ConvertToHalf_half_uint", ["Isa"] = "SveFp16", ["Method"] = "ConvertToHalf", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveFp16ConvertToHalf.template", new Dictionary { ["TestName"] = "SveFp16ConvertToHalf_half_ulong", ["Isa"] = "SveFp16", ["Method"] = "ConvertToHalf", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveFp16ConvertToInt16.template", new Dictionary { ["TestName"] = "SveFp16ConvertToInt16_short_half", ["Isa"] = "SveFp16", ["Method"] = "ConvertToInt16", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ConvertToInt32.template", new Dictionary { ["TestName"] = "SveFp16ConvertToInt32_int_half", ["Isa"] = "SveFp16", ["Method"] = "ConvertToInt32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ConvertToInt64.template", new Dictionary { ["TestName"] = "SveFp16ConvertToInt64_long_half", ["Isa"] = "SveFp16", ["Method"] = "ConvertToInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ConvertToSingle.template", new Dictionary { ["TestName"] = "SveFp16ConvertToSingle_float_half", ["Isa"] = "SveFp16", ["Method"] = "ConvertToSingle", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ConvertToUInt16.template", new Dictionary { ["TestName"] = "SveFp16ConvertToUInt16_ushort_half", ["Isa"] = "SveFp16", ["Method"] = "ConvertToUInt16", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ConvertToUInt32.template", new Dictionary { ["TestName"] = "SveFp16ConvertToUInt32_uint_half", ["Isa"] = "SveFp16", ["Method"] = "ConvertToUInt32", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ConvertToUInt64.template", new Dictionary { ["TestName"] = "SveFp16ConvertToUInt64_ulong_half", ["Isa"] = "SveFp16", ["Method"] = "ConvertToUInt64", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16CreateFalseMaskHalf.template",new Dictionary {["TestName"] = "SveFp16CreateFalseMaskHalf_half", ["Isa"] = "SveFp16", ["Method"] = "CreateFalseMaskHalf", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16CreateTrueMaskHalf.template",new Dictionary { ["TestName"] = "SveFp16CreateTrueMaskHalf_half", ["Isa"] = "SveFp16", ["Method"] = "CreateTrueMaskHalf", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1BaseType"] = "SveMaskPattern", ["LargestVectorSize"] = "8",}), + ("SveFp16CreateWhileReadAfterWriteMask.template",new Dictionary {["TestName"] = "SveFp16CreateWhileReadAfterWriteMask_half", ["Isa"] = "SveFp16", ["Method"] = "CreateWhileReadAfterWriteMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1BaseType"] = "Half", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16CreateWhileWriteAfterReadMask.template",new Dictionary {["TestName"] = "SveFp16CreateWhileWriteAfterReadMask_half", ["Isa"] = "SveFp16", ["Method"] = "CreateWhileWriteAfterReadMask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1BaseType"] = "Half", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16Divide.template", new Dictionary { ["TestName"] = "SveFp16Divide_half", ["Isa"] = "SveFp16", ["Method"] = "Divide", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16DownConvertNarrowingUpper.template",new Dictionary {["TestName"] = "SveFp16DownConvertNarrowingUpper_half_float", ["Isa"] = "SveFp16", ["Method"] = "DownConvertNarrowingUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["LargestVectorSize"] = "8",}), + ("SveFp16DuplicateSelectedScalarToVector.template",new Dictionary {["TestName"] = "SveFp16DuplicateSelectedScalarToVector_half", ["Isa"] = "SveFp16", ["Method"] = "DuplicateSelectedScalarToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveFp16ExtractAfterLastScalar.template",new Dictionary {["TestName"] = "SveFp16ExtractAfterLastScalar_half", ["Isa"] = "SveFp16", ["Method"] = "ExtractAfterLastScalar", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ExtractAfterLastVector.template",new Dictionary {["TestName"] = "SveFp16ExtractAfterLastVector_half", ["Isa"] = "SveFp16", ["Method"] = "ExtractAfterLastVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ExtractLastScalar.template", new Dictionary { ["TestName"] = "SveFp16ExtractLastScalar_half", ["Isa"] = "SveFp16", ["Method"] = "ExtractLastScalar", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ExtractLastVector.template", new Dictionary { ["TestName"] = "SveFp16ExtractLastVector_half", ["Isa"] = "SveFp16", ["Method"] = "ExtractLastVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ExtractVector.template", new Dictionary { ["TestName"] = "SveFp16ExtractVector_half", ["Isa"] = "SveFp16", ["Method"] = "ExtractVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveFp16FloatingPointExponentialAccelerator.template",new Dictionary {["TestName"] = "SveFp16FloatingPointExponentialAccelerator_half_ushort", ["Isa"] = "SveFp16", ["Method"] = "FloatingPointExponentialAccelerator", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveFp16FusedMultiplyAdd.template", new Dictionary { ["TestName"] = "SveFp16FusedMultiplyAdd_half", ["Isa"] = "SveFp16", ["Method"] = "FusedMultiplyAdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16FusedMultiplyAddBySelectedScalar.template",new Dictionary {["TestName"] = "SveFp16FusedMultiplyAddBySelectedScalar_half", ["Isa"] = "SveFp16", ["Method"] = "FusedMultiplyAddBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Half",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("SveFp16FusedMultiplyAddNegated.template",new Dictionary {["TestName"] = "SveFp16FusedMultiplyAddNegated_half", ["Isa"] = "SveFp16", ["Method"] = "FusedMultiplyAddNegated", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16FusedMultiplySubtract.template",new Dictionary {["TestName"] = "SveFp16FusedMultiplySubtract_half", ["Isa"] = "SveFp16", ["Method"] = "FusedMultiplySubtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16FusedMultiplySubtractBySelectedScalar.template",new Dictionary {["TestName"] = "SveFp16FusedMultiplySubtractBySelectedScalar_half", ["Isa"] = "SveFp16", ["Method"] = "FusedMultiplySubtractBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Half",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("SveFp16FusedMultiplySubtractNegated.template",new Dictionary {["TestName"] = "SveFp16FusedMultiplySubtractNegated_half", ["Isa"] = "SveFp16", ["Method"] = "FusedMultiplySubtractNegated", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16GetActiveElementCount.template",new Dictionary {["TestName"] = "SveFp16GetActiveElementCount_half", ["Isa"] = "SveFp16", ["Method"] = "GetActiveElementCount", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16InsertIntoShiftedVector.template",new Dictionary {["TestName"] = "SveFp16InsertIntoShiftedVector_half", ["Isa"] = "SveFp16", ["Method"] = "InsertIntoShiftedVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16InterleaveEvenInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveFp16InterleaveEvenInt128FromTwoInputs_half", ["Isa"] = "SveFp16", ["Method"] = "InterleaveEvenInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16InterleaveInt128FromHighHalvesOfTwoInputs.template",new Dictionary {["TestName"] = "SveFp16InterleaveInt128FromHighHalvesOfTwoInputs_half", ["Isa"] = "SveFp16", ["Method"] = "InterleaveInt128FromHighHalvesOfTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16InterleaveInt128FromLowHalvesOfTwoInputs.template",new Dictionary {["TestName"] = "SveFp16InterleaveInt128FromLowHalvesOfTwoInputs_half", ["Isa"] = "SveFp16", ["Method"] = "InterleaveInt128FromLowHalvesOfTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16InterleaveOddInt128FromTwoInputs.template",new Dictionary {["TestName"] = "SveFp16InterleaveOddInt128FromTwoInputs_half", ["Isa"] = "SveFp16", ["Method"] = "InterleaveOddInt128FromTwoInputs", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16LoadVector.template", new Dictionary { ["TestName"] = "SveFp16LoadVector_half", ["Isa"] = "SveFp16", ["Method"] = "LoadVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16LoadVector128AndReplicateToVector.template",new Dictionary {["TestName"] = "SveFp16LoadVector128AndReplicateToVector_half", ["Isa"] = "SveFp16", ["Method"] = "LoadVector128AndReplicateToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16LoadVector256AndReplicateToVector.template",new Dictionary {["TestName"] = "SveFp16LoadVector256AndReplicateToVector_half", ["Isa"] = "SveFp16", ["Method"] = "LoadVector256AndReplicateToVector", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16LoadVectorFirstFaulting.template",new Dictionary {["TestName"] = "SveFp16LoadVectorFirstFaulting_half", ["Isa"] = "SveFp16", ["Method"] = "LoadVectorFirstFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16LoadVectorNonFaulting.template",new Dictionary {["TestName"] = "SveFp16LoadVectorNonFaulting_half", ["Isa"] = "SveFp16", ["Method"] = "LoadVectorNonFaulting", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16LoadVectorNonTemporal.template",new Dictionary {["TestName"] = "SveFp16LoadVectorNonTemporal_half", ["Isa"] = "SveFp16", ["Method"] = "LoadVectorNonTemporal", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16LoadVectorx2.template", new Dictionary { ["TestName"] = "SveFp16LoadVectorx2_half", ["Isa"] = "SveFp16", ["Method"] = "LoadVectorx2", ["RetVectorType"] = "Vector_Vector",["RetBaseType"] = "Half_Half",["Op1VectorType"] = "Vector",["Op1BaseType"] = "Half", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16LoadVectorx3.template", new Dictionary { ["TestName"] = "SveFp16LoadVectorx3_half", ["Isa"] = "SveFp16", ["Method"] = "LoadVectorx3", ["RetVectorType"] = "Vector_Vector_Vector",["RetBaseType"] = "Half_Half_Half",["Op1VectorType"] = "Vector",["Op1BaseType"] = "Half", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16LoadVectorx4.template", new Dictionary { ["TestName"] = "SveFp16LoadVectorx4_half", ["Isa"] = "SveFp16", ["Method"] = "LoadVectorx4", ["RetVectorType"] = "Vector_Vector_Vector_Vector",["RetBaseType"] = "Half_Half_Half_Half",["Op1VectorType"] = "Vector",["Op1BaseType"] = "Half", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16Log2.template", new Dictionary { ["TestName"] = "SveFp16Log2_short_half", ["Isa"] = "SveFp16", ["Method"] = "Log2", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16Max.template", new Dictionary { ["TestName"] = "SveFp16Max_half", ["Isa"] = "SveFp16", ["Method"] = "Max", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16MaxAcross.template", new Dictionary { ["TestName"] = "SveFp16MaxAcross_half", ["Isa"] = "SveFp16", ["Method"] = "MaxAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16MaxNumber.template", new Dictionary { ["TestName"] = "SveFp16MaxNumber_half", ["Isa"] = "SveFp16", ["Method"] = "MaxNumber", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16MaxNumberAcross.template", new Dictionary { ["TestName"] = "SveFp16MaxNumberAcross_half", ["Isa"] = "SveFp16", ["Method"] = "MaxNumberAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16MaxNumberPairwise.template", new Dictionary { ["TestName"] = "SveFp16MaxNumberPairwise_half", ["Isa"] = "SveFp16", ["Method"] = "MaxNumberPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16MaxPairwise.template", new Dictionary { ["TestName"] = "SveFp16MaxPairwise_half", ["Isa"] = "SveFp16", ["Method"] = "MaxPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16Min.template", new Dictionary { ["TestName"] = "SveFp16Min_half", ["Isa"] = "SveFp16", ["Method"] = "Min", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16MinAcross.template", new Dictionary { ["TestName"] = "SveFp16MinAcross_half", ["Isa"] = "SveFp16", ["Method"] = "MinAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16MinNumber.template", new Dictionary { ["TestName"] = "SveFp16MinNumber_half", ["Isa"] = "SveFp16", ["Method"] = "MinNumber", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16MinNumberAcross.template", new Dictionary { ["TestName"] = "SveFp16MinNumberAcross_half", ["Isa"] = "SveFp16", ["Method"] = "MinNumberAcross", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16MinNumberPairwise.template", new Dictionary { ["TestName"] = "SveFp16MinNumberPairwise_half", ["Isa"] = "SveFp16", ["Method"] = "MinNumberPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16MinPairwise.template", new Dictionary { ["TestName"] = "SveFp16MinPairwise_half", ["Isa"] = "SveFp16", ["Method"] = "MinPairwise", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16Multiply.template", new Dictionary { ["TestName"] = "SveFp16Multiply_half", ["Isa"] = "SveFp16", ["Method"] = "Multiply", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16MultiplyAddRotateComplex.template",new Dictionary {["TestName"] = "SveFp16MultiplyAddRotateComplex_half", ["Isa"] = "SveFp16", ["Method"] = "MultiplyAddRotateComplex", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Half",["Op4BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("SveFp16MultiplyAddRotateComplexBySelectedScalar.template",new Dictionary {["TestName"] = "SveFp16MultiplyAddRotateComplexBySelectedScalar_half", ["Isa"] = "SveFp16", ["Method"] = "MultiplyAddRotateComplexBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Half",["Op4BaseType"] = "Byte",["Op5BaseType"] = "Byte",["LargestVectorSize"] = "8",}), + ("SveFp16MultiplyAddWideningLower.template",new Dictionary {["TestName"] = "SveFp16MultiplyAddWideningLower_float_half", ["Isa"] = "SveFp16", ["Method"] = "MultiplyAddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16MultiplyAddWideningLower.template",new Dictionary {["TestName"] = "SveFp16MultiplyAddWideningLower_float_half", ["Isa"] = "SveFp16", ["Method"] = "MultiplyAddWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Half",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("SveFp16MultiplyAddWideningUpper.template",new Dictionary {["TestName"] = "SveFp16MultiplyAddWideningUpper_float_half", ["Isa"] = "SveFp16", ["Method"] = "MultiplyAddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16MultiplyAddWideningUpper.template",new Dictionary {["TestName"] = "SveFp16MultiplyAddWideningUpper_float_half", ["Isa"] = "SveFp16", ["Method"] = "MultiplyAddWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Half",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("SveFp16MultiplyBySelectedScalar.template",new Dictionary {["TestName"] = "SveFp16MultiplyBySelectedScalar_half", ["Isa"] = "SveFp16", ["Method"] = "MultiplyBySelectedScalar", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveFp16MultiplyExtended.template", new Dictionary { ["TestName"] = "SveFp16MultiplyExtended_half", ["Isa"] = "SveFp16", ["Method"] = "MultiplyExtended", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16MultiplySubtractWideningLower.template",new Dictionary {["TestName"] = "SveFp16MultiplySubtractWideningLower_float_half", ["Isa"] = "SveFp16", ["Method"] = "MultiplySubtractWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16MultiplySubtractWideningLower.template",new Dictionary {["TestName"] = "SveFp16MultiplySubtractWideningLower_float_half", ["Isa"] = "SveFp16", ["Method"] = "MultiplySubtractWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Half",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("SveFp16MultiplySubtractWideningUpper.template",new Dictionary {["TestName"] = "SveFp16MultiplySubtractWideningUpper_float_half", ["Isa"] = "SveFp16", ["Method"] = "MultiplySubtractWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16MultiplySubtractWideningUpper.template",new Dictionary {["TestName"] = "SveFp16MultiplySubtractWideningUpper_float_half", ["Isa"] = "SveFp16", ["Method"] = "MultiplySubtractWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Single", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Half",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("SveFp16Negate.template", new Dictionary { ["TestName"] = "SveFp16Negate_half", ["Isa"] = "SveFp16", ["Method"] = "Negate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16PopCount.template", new Dictionary { ["TestName"] = "SveFp16PopCount_ushort_half", ["Isa"] = "SveFp16", ["Method"] = "PopCount", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ReciprocalEstimate.template",new Dictionary { ["TestName"] = "SveFp16ReciprocalEstimate_half", ["Isa"] = "SveFp16", ["Method"] = "ReciprocalEstimate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ReciprocalExponent.template",new Dictionary { ["TestName"] = "SveFp16ReciprocalExponent_half", ["Isa"] = "SveFp16", ["Method"] = "ReciprocalExponent", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ReciprocalSqrtEstimate.template",new Dictionary {["TestName"] = "SveFp16ReciprocalSqrtEstimate_half", ["Isa"] = "SveFp16", ["Method"] = "ReciprocalSqrtEstimate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ReciprocalSqrtStep.template",new Dictionary { ["TestName"] = "SveFp16ReciprocalSqrtStep_half", ["Isa"] = "SveFp16", ["Method"] = "ReciprocalSqrtStep", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ReciprocalStep.template", new Dictionary { ["TestName"] = "SveFp16ReciprocalStep_half", ["Isa"] = "SveFp16", ["Method"] = "ReciprocalStep", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ReverseElement.template", new Dictionary { ["TestName"] = "SveFp16ReverseElement_half", ["Isa"] = "SveFp16", ["Method"] = "ReverseElement", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16RoundAwayFromZero.template", new Dictionary { ["TestName"] = "SveFp16RoundAwayFromZero_half", ["Isa"] = "SveFp16", ["Method"] = "RoundAwayFromZero", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16RoundToNearest.template", new Dictionary { ["TestName"] = "SveFp16RoundToNearest_half", ["Isa"] = "SveFp16", ["Method"] = "RoundToNearest", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16RoundToNegativeInfinity.template",new Dictionary {["TestName"] = "SveFp16RoundToNegativeInfinity_half", ["Isa"] = "SveFp16", ["Method"] = "RoundToNegativeInfinity", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16RoundToPositiveInfinity.template",new Dictionary {["TestName"] = "SveFp16RoundToPositiveInfinity_half", ["Isa"] = "SveFp16", ["Method"] = "RoundToPositiveInfinity", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16RoundToZero.template", new Dictionary { ["TestName"] = "SveFp16RoundToZero_half", ["Isa"] = "SveFp16", ["Method"] = "RoundToZero", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16Scale.template", new Dictionary { ["TestName"] = "SveFp16Scale_half_short", ["Isa"] = "SveFp16", ["Method"] = "Scale", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int16", ["LargestVectorSize"] = "8",}), + ("SveFp16Splice.template", new Dictionary { ["TestName"] = "SveFp16Splice_half", ["Isa"] = "SveFp16", ["Method"] = "Splice", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16Sqrt.template", new Dictionary { ["TestName"] = "SveFp16Sqrt_half", ["Isa"] = "SveFp16", ["Method"] = "Sqrt", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16Store.template", new Dictionary { ["TestName"] = "SveFp16Store_half", ["Isa"] = "SveFp16", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16Store.template", new Dictionary { ["TestName"] = "SveFp16Store_half", ["Isa"] = "SveFp16", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector_Vector",["Op3BaseType"] = "Half_Half",["LargestVectorSize"] = "8",}), + ("SveFp16Store.template", new Dictionary { ["TestName"] = "SveFp16Store_half", ["Isa"] = "SveFp16", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector_Vector_Vector",["Op3BaseType"] = "Half_Half_Half",["LargestVectorSize"] = "8",}), + ("SveFp16Store.template", new Dictionary { ["TestName"] = "SveFp16Store_half", ["Isa"] = "SveFp16", ["Method"] = "Store", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector_Vector_Vector_Vector",["Op3BaseType"] = "Half_Half_Half_Half",["LargestVectorSize"] = "8",}), + ("SveFp16StoreNonTemporal.template", new Dictionary { ["TestName"] = "SveFp16StoreNonTemporal_half", ["Isa"] = "SveFp16", ["Method"] = "StoreNonTemporal", ["RetBaseType"] = "void", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16Subtract.template", new Dictionary { ["TestName"] = "SveFp16Subtract_half", ["Isa"] = "SveFp16", ["Method"] = "Subtract", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16TransposeEven.template", new Dictionary { ["TestName"] = "SveFp16TransposeEven_half", ["Isa"] = "SveFp16", ["Method"] = "TransposeEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16TransposeOdd.template", new Dictionary { ["TestName"] = "SveFp16TransposeOdd_half", ["Isa"] = "SveFp16", ["Method"] = "TransposeOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16TrigonometricMultiplyAddCoefficient.template",new Dictionary {["TestName"] = "SveFp16TrigonometricMultiplyAddCoefficient_half", ["Isa"] = "SveFp16", ["Method"] = "TrigonometricMultiplyAddCoefficient", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveFp16TrigonometricSelectCoefficient.template",new Dictionary {["TestName"] = "SveFp16TrigonometricSelectCoefficient_half_ushort", ["Isa"] = "SveFp16", ["Method"] = "TrigonometricSelectCoefficient", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveFp16TrigonometricStartingValue.template",new Dictionary {["TestName"] = "SveFp16TrigonometricStartingValue_half_ushort", ["Isa"] = "SveFp16", ["Method"] = "TrigonometricStartingValue", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveFp16UnzipEven.template", new Dictionary { ["TestName"] = "SveFp16UnzipEven_half", ["Isa"] = "SveFp16", ["Method"] = "UnzipEven", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16UnzipOdd.template", new Dictionary { ["TestName"] = "SveFp16UnzipOdd_half", ["Isa"] = "SveFp16", ["Method"] = "UnzipOdd", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16UpConvertWideningUpper.template",new Dictionary {["TestName"] = "SveFp16UpConvertWideningUpper_float_half", ["Isa"] = "SveFp16", ["Method"] = "UpConvertWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Single", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16VectorTableLookup.template", new Dictionary { ["TestName"] = "SveFp16VectorTableLookup_half_ushort", ["Isa"] = "SveFp16", ["Method"] = "VectorTableLookup", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveFp16VectorTableLookup.template", new Dictionary { ["TestName"] = "SveFp16VectorTableLookup_half_ushort", ["Isa"] = "SveFp16", ["Method"] = "VectorTableLookup", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector_Vector",["Op1BaseType"] = "Half_Half",["Op2VectorType"] = "Vector",["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveFp16VectorTableLookupExtension.template",new Dictionary {["TestName"] = "SveFp16VectorTableLookupExtension_half_ushort", ["Isa"] = "SveFp16", ["Method"] = "VectorTableLookupExtension", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveFp16ZipHigh.template", new Dictionary { ["TestName"] = "SveFp16ZipHigh_half", ["Isa"] = "SveFp16", ["Method"] = "ZipHigh", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + ("SveFp16ZipLow.template", new Dictionary { ["TestName"] = "SveFp16ZipLow_half", ["Isa"] = "SveFp16", ["Method"] = "ZipLow", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Half", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Half", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Half", ["LargestVectorSize"] = "8",}), + + +// SveI8mm + ("SveI8mmDotProductSignedUnsigned.template",new Dictionary {["TestName"] = "SveI8mmDotProductSignedUnsigned_int_sbyte_byte", ["Isa"] = "SveI8mm", ["Method"] = "DotProductSignedUnsigned", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveI8mmDotProductSignedUnsigned.template",new Dictionary {["TestName"] = "SveI8mmDotProductSignedUnsigned_int_sbyte_byte", ["Isa"] = "SveI8mm", ["Method"] = "DotProductSignedUnsigned", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("SveI8mmDotProductUnsignedSigned.template",new Dictionary {["TestName"] = "SveI8mmDotProductUnsignedSigned_int_byte_sbyte", ["Isa"] = "SveI8mm", ["Method"] = "DotProductUnsignedSigned", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveI8mmDotProductUnsignedSigned.template",new Dictionary {["TestName"] = "SveI8mmDotProductUnsignedSigned_int_byte_sbyte", ["Isa"] = "SveI8mm", ["Method"] = "DotProductUnsignedSigned", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte",["Op4BaseType"] = "UInt64",["LargestVectorSize"] = "8",}), + ("SveI8mmMatrixMultiplyAccumulate.template",new Dictionary {["TestName"] = "SveI8mmMatrixMultiplyAccumulate_int_sbyte", ["Isa"] = "SveI8mm", ["Method"] = "MatrixMultiplyAccumulate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "SByte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + ("SveI8mmMatrixMultiplyAccumulate.template",new Dictionary {["TestName"] = "SveI8mmMatrixMultiplyAccumulate_uint_byte", ["Isa"] = "SveI8mm", ["Method"] = "MatrixMultiplyAccumulate", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveI8mmMatrixMultiplyAccumulateUnsignedSigned.template",new Dictionary {["TestName"] = "SveI8mmMatrixMultiplyAccumulateUnsignedSigned_int_byte_sbyte", ["Isa"] = "SveI8mm", ["Method"] = "MatrixMultiplyAccumulateUnsignedSigned", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["Op3VectorType"] = "Vector", ["Op3BaseType"] = "SByte", ["LargestVectorSize"] = "8",}), + + +// Sha3 + ("Sha3BitwiseClearXor.template", new Dictionary { ["TestName"] = "Sha3BitwiseClearXor_byte", ["Isa"] = "Sha3", ["Method"] = "BitwiseClearXor", ["RetBaseType"] = "Vector128", ["Op1BaseType"] = "Vector128", ["Op2BaseType"] = "Vector128", ["Op3BaseType"] = "Vector128",["LargestVectorSize"] = "8",}), + ("Sha3BitwiseClearXor.template", new Dictionary { ["TestName"] = "Sha3BitwiseClearXor_ushort", ["Isa"] = "Sha3", ["Method"] = "BitwiseClearXor", ["RetBaseType"] = "Vector128", ["Op1BaseType"] = "Vector128", ["Op2BaseType"] = "Vector128", ["Op3BaseType"] = "Vector128",["LargestVectorSize"] = "8",}), + ("Sha3BitwiseClearXor.template", new Dictionary { ["TestName"] = "Sha3BitwiseClearXor_uint", ["Isa"] = "Sha3", ["Method"] = "BitwiseClearXor", ["RetBaseType"] = "Vector128", ["Op1BaseType"] = "Vector128", ["Op2BaseType"] = "Vector128", ["Op3BaseType"] = "Vector128",["LargestVectorSize"] = "8",}), + ("Sha3BitwiseClearXor.template", new Dictionary { ["TestName"] = "Sha3BitwiseClearXor_ulong", ["Isa"] = "Sha3", ["Method"] = "BitwiseClearXor", ["RetBaseType"] = "Vector128", ["Op1BaseType"] = "Vector128", ["Op2BaseType"] = "Vector128", ["Op3BaseType"] = "Vector128",["LargestVectorSize"] = "8",}), + ("Sha3BitwiseClearXor.template", new Dictionary { ["TestName"] = "Sha3BitwiseClearXor_sbyte", ["Isa"] = "Sha3", ["Method"] = "BitwiseClearXor", ["RetBaseType"] = "Vector128", ["Op1BaseType"] = "Vector128", ["Op2BaseType"] = "Vector128", ["Op3BaseType"] = "Vector128",["LargestVectorSize"] = "8",}), + ("Sha3BitwiseClearXor.template", new Dictionary { ["TestName"] = "Sha3BitwiseClearXor_short", ["Isa"] = "Sha3", ["Method"] = "BitwiseClearXor", ["RetBaseType"] = "Vector128", ["Op1BaseType"] = "Vector128", ["Op2BaseType"] = "Vector128", ["Op3BaseType"] = "Vector128",["LargestVectorSize"] = "8",}), + ("Sha3BitwiseClearXor.template", new Dictionary { ["TestName"] = "Sha3BitwiseClearXor_int", ["Isa"] = "Sha3", ["Method"] = "BitwiseClearXor", ["RetBaseType"] = "Vector128", ["Op1BaseType"] = "Vector128", ["Op2BaseType"] = "Vector128", ["Op3BaseType"] = "Vector128",["LargestVectorSize"] = "8",}), + ("Sha3BitwiseClearXor.template", new Dictionary { ["TestName"] = "Sha3BitwiseClearXor_long", ["Isa"] = "Sha3", ["Method"] = "BitwiseClearXor", ["RetBaseType"] = "Vector128", ["Op1BaseType"] = "Vector128", ["Op2BaseType"] = "Vector128", ["Op3BaseType"] = "Vector128",["LargestVectorSize"] = "8",}), + ("Sha3BitwiseRotateLeftBy1AndXor.template",new Dictionary {["TestName"] = "Sha3BitwiseRotateLeftBy1AndXor_ulong", ["Isa"] = "Sha3", ["Method"] = "BitwiseRotateLeftBy1AndXor", ["RetBaseType"] = "Vector128", ["Op1BaseType"] = "Vector128", ["Op2BaseType"] = "Vector128", ["LargestVectorSize"] = "8",}), + ("Sha3Xor.template", new Dictionary { ["TestName"] = "Sha3Xor_byte", ["Isa"] = "Sha3", ["Method"] = "Xor", ["RetBaseType"] = "Vector128", ["Op1BaseType"] = "Vector128", ["Op2BaseType"] = "Vector128", ["Op3BaseType"] = "Vector128",["LargestVectorSize"] = "8",}), + ("Sha3Xor.template", new Dictionary { ["TestName"] = "Sha3Xor_ushort", ["Isa"] = "Sha3", ["Method"] = "Xor", ["RetBaseType"] = "Vector128", ["Op1BaseType"] = "Vector128", ["Op2BaseType"] = "Vector128", ["Op3BaseType"] = "Vector128",["LargestVectorSize"] = "8",}), + ("Sha3Xor.template", new Dictionary { ["TestName"] = "Sha3Xor_uint", ["Isa"] = "Sha3", ["Method"] = "Xor", ["RetBaseType"] = "Vector128", ["Op1BaseType"] = "Vector128", ["Op2BaseType"] = "Vector128", ["Op3BaseType"] = "Vector128",["LargestVectorSize"] = "8",}), + ("Sha3Xor.template", new Dictionary { ["TestName"] = "Sha3Xor_ulong", ["Isa"] = "Sha3", ["Method"] = "Xor", ["RetBaseType"] = "Vector128", ["Op1BaseType"] = "Vector128", ["Op2BaseType"] = "Vector128", ["Op3BaseType"] = "Vector128",["LargestVectorSize"] = "8",}), + ("Sha3Xor.template", new Dictionary { ["TestName"] = "Sha3Xor_sbyte", ["Isa"] = "Sha3", ["Method"] = "Xor", ["RetBaseType"] = "Vector128", ["Op1BaseType"] = "Vector128", ["Op2BaseType"] = "Vector128", ["Op3BaseType"] = "Vector128",["LargestVectorSize"] = "8",}), + ("Sha3Xor.template", new Dictionary { ["TestName"] = "Sha3Xor_short", ["Isa"] = "Sha3", ["Method"] = "Xor", ["RetBaseType"] = "Vector128", ["Op1BaseType"] = "Vector128", ["Op2BaseType"] = "Vector128", ["Op3BaseType"] = "Vector128",["LargestVectorSize"] = "8",}), + ("Sha3Xor.template", new Dictionary { ["TestName"] = "Sha3Xor_int", ["Isa"] = "Sha3", ["Method"] = "Xor", ["RetBaseType"] = "Vector128", ["Op1BaseType"] = "Vector128", ["Op2BaseType"] = "Vector128", ["Op3BaseType"] = "Vector128",["LargestVectorSize"] = "8",}), + ("Sha3Xor.template", new Dictionary { ["TestName"] = "Sha3Xor_long", ["Isa"] = "Sha3", ["Method"] = "Xor", ["RetBaseType"] = "Vector128", ["Op1BaseType"] = "Vector128", ["Op2BaseType"] = "Vector128", ["Op3BaseType"] = "Vector128",["LargestVectorSize"] = "8",}), + ("Sha3XorRotateRight.template", new Dictionary { ["TestName"] = "Sha3XorRotateRight_ulong", ["Isa"] = "Sha3", ["Method"] = "XorRotateRight", ["RetBaseType"] = "Vector128", ["Op1BaseType"] = "Vector128", ["Op2BaseType"] = "Vector128", ["Op3BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + + +// Sm4 + ("Sm4Sm4EncryptionAndDecryption.template",new Dictionary {["TestName"] = "Sm4Sm4EncryptionAndDecryption_uint", ["Isa"] = "Sm4", ["Method"] = "Sm4EncryptionAndDecryption", ["RetBaseType"] = "Vector128", ["Op1BaseType"] = "Vector128", ["Op2BaseType"] = "Vector128", ["LargestVectorSize"] = "8",}), + ("Sm4Sm4KeyUpdates.template", new Dictionary { ["TestName"] = "Sm4Sm4KeyUpdates_uint", ["Isa"] = "Sm4", ["Method"] = "Sm4KeyUpdates", ["RetBaseType"] = "Vector128", ["Op1BaseType"] = "Vector128", ["Op2BaseType"] = "Vector128", ["LargestVectorSize"] = "8",}), + + +// SveAes + ("SveAesAesInverseMixColumns.template",new Dictionary {["TestName"] = "SveAesAesInverseMixColumns_byte", ["Isa"] = "SveAes", ["Method"] = "AesInverseMixColumns", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveAesAesMixColumns.template", new Dictionary { ["TestName"] = "SveAesAesMixColumns_byte", ["Isa"] = "SveAes", ["Method"] = "AesMixColumns", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveAesAesSingleRoundDecryption.template",new Dictionary {["TestName"] = "SveAesAesSingleRoundDecryption_byte", ["Isa"] = "SveAes", ["Method"] = "AesSingleRoundDecryption", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveAesAesSingleRoundEncryption.template",new Dictionary {["TestName"] = "SveAesAesSingleRoundEncryption_byte", ["Isa"] = "SveAes", ["Method"] = "AesSingleRoundEncryption", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveAesPolynomialMultiplyWideningLower.template",new Dictionary {["TestName"] = "SveAesPolynomialMultiplyWideningLower_ulong", ["Isa"] = "SveAes", ["Method"] = "PolynomialMultiplyWideningLower", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveAesPolynomialMultiplyWideningUpper.template",new Dictionary {["TestName"] = "SveAesPolynomialMultiplyWideningUpper_ulong", ["Isa"] = "SveAes", ["Method"] = "PolynomialMultiplyWideningUpper", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + + +// SveBitperm + ("SveBitpermGatherLowerBitsFromPositionsSelectedByBitmask.template",new Dictionary {["TestName"] = "SveBitpermGatherLowerBitsFromPositionsSelectedByBitmask_byte", ["Isa"] = "SveBitperm", ["Method"] = "GatherLowerBitsFromPositionsSelectedByBitmask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveBitpermGatherLowerBitsFromPositionsSelectedByBitmask.template",new Dictionary {["TestName"] = "SveBitpermGatherLowerBitsFromPositionsSelectedByBitmask_ushort", ["Isa"] = "SveBitperm", ["Method"] = "GatherLowerBitsFromPositionsSelectedByBitmask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveBitpermGatherLowerBitsFromPositionsSelectedByBitmask.template",new Dictionary {["TestName"] = "SveBitpermGatherLowerBitsFromPositionsSelectedByBitmask_uint", ["Isa"] = "SveBitperm", ["Method"] = "GatherLowerBitsFromPositionsSelectedByBitmask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveBitpermGatherLowerBitsFromPositionsSelectedByBitmask.template",new Dictionary {["TestName"] = "SveBitpermGatherLowerBitsFromPositionsSelectedByBitmask_ulong", ["Isa"] = "SveBitperm", ["Method"] = "GatherLowerBitsFromPositionsSelectedByBitmask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveBitpermGroupBitsToRightOrLeftAsSelectedByBitmask.template",new Dictionary {["TestName"] = "SveBitpermGroupBitsToRightOrLeftAsSelectedByBitmask_byte", ["Isa"] = "SveBitperm", ["Method"] = "GroupBitsToRightOrLeftAsSelectedByBitmask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveBitpermGroupBitsToRightOrLeftAsSelectedByBitmask.template",new Dictionary {["TestName"] = "SveBitpermGroupBitsToRightOrLeftAsSelectedByBitmask_ushort", ["Isa"] = "SveBitperm", ["Method"] = "GroupBitsToRightOrLeftAsSelectedByBitmask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveBitpermGroupBitsToRightOrLeftAsSelectedByBitmask.template",new Dictionary {["TestName"] = "SveBitpermGroupBitsToRightOrLeftAsSelectedByBitmask_uint", ["Isa"] = "SveBitperm", ["Method"] = "GroupBitsToRightOrLeftAsSelectedByBitmask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveBitpermGroupBitsToRightOrLeftAsSelectedByBitmask.template",new Dictionary {["TestName"] = "SveBitpermGroupBitsToRightOrLeftAsSelectedByBitmask_ulong", ["Isa"] = "SveBitperm", ["Method"] = "GroupBitsToRightOrLeftAsSelectedByBitmask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + ("SveBitpermScatterLowerBitsIntoPositionsSelectedByBitmask.template",new Dictionary {["TestName"] = "SveBitpermScatterLowerBitsIntoPositionsSelectedByBitmask_byte", ["Isa"] = "SveBitperm", ["Method"] = "ScatterLowerBitsIntoPositionsSelectedByBitmask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Byte", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Byte", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Byte", ["LargestVectorSize"] = "8",}), + ("SveBitpermScatterLowerBitsIntoPositionsSelectedByBitmask.template",new Dictionary {["TestName"] = "SveBitpermScatterLowerBitsIntoPositionsSelectedByBitmask_ushort", ["Isa"] = "SveBitperm", ["Method"] = "ScatterLowerBitsIntoPositionsSelectedByBitmask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt16", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt16", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt16", ["LargestVectorSize"] = "8",}), + ("SveBitpermScatterLowerBitsIntoPositionsSelectedByBitmask.template",new Dictionary {["TestName"] = "SveBitpermScatterLowerBitsIntoPositionsSelectedByBitmask_uint", ["Isa"] = "SveBitperm", ["Method"] = "ScatterLowerBitsIntoPositionsSelectedByBitmask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveBitpermScatterLowerBitsIntoPositionsSelectedByBitmask.template",new Dictionary {["TestName"] = "SveBitpermScatterLowerBitsIntoPositionsSelectedByBitmask_ulong", ["Isa"] = "SveBitperm", ["Method"] = "ScatterLowerBitsIntoPositionsSelectedByBitmask", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + + +// SveSha3 + ("SveSha3BitwiseRotateLeftBy1AndXor.template",new Dictionary {["TestName"] = "SveSha3BitwiseRotateLeftBy1AndXor_long", ["Isa"] = "SveSha3", ["Method"] = "BitwiseRotateLeftBy1AndXor", ["RetVectorType"] = "Vector", ["RetBaseType"] = "Int64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "Int64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "Int64", ["LargestVectorSize"] = "8",}), + ("SveSha3BitwiseRotateLeftBy1AndXor.template",new Dictionary {["TestName"] = "SveSha3BitwiseRotateLeftBy1AndXor_ulong", ["Isa"] = "SveSha3", ["Method"] = "BitwiseRotateLeftBy1AndXor", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt64", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt64", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt64", ["LargestVectorSize"] = "8",}), + + +// SveSm4 + ("SveSm4Sm4EncryptionAndDecryption.template",new Dictionary {["TestName"] = "SveSm4Sm4EncryptionAndDecryption_uint", ["Isa"] = "SveSm4", ["Method"] = "Sm4EncryptionAndDecryption", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + ("SveSm4Sm4KeyUpdates.template", new Dictionary { ["TestName"] = "SveSm4Sm4KeyUpdates_uint", ["Isa"] = "SveSm4", ["Method"] = "Sm4KeyUpdates", ["RetVectorType"] = "Vector", ["RetBaseType"] = "UInt32", ["Op1VectorType"] = "Vector", ["Op1BaseType"] = "UInt32", ["Op2VectorType"] = "Vector", ["Op2BaseType"] = "UInt32", ["LargestVectorSize"] = "8",}), + + diff --git a/sve_api/out_api/apiraw_FEAT_BF16__.cs b/sve_api/out_api/apiraw_FEAT_BF16__.cs new file mode 100644 index 0000000000000..df6b563e86137 --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_BF16__.cs @@ -0,0 +1,554 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class SveBf16 : AdvSimd /// Feature: FEAT_BF16 +{ + + public static unsafe Vector Bfloat16DotProduct(Vector addend, Vector left, Vector right); // BFDOT // MOVPRFX + + public static unsafe Vector Bfloat16MatrixMultiplyAccumulate(Vector op1, Vector op2, Vector op3); // BFMMLA // MOVPRFX + + public static unsafe Vector Bfloat16MultiplyAddWideningToSinglePrecisionLower(Vector op1, Vector op2, Vector op3); // BFMLALB // MOVPRFX + + public static unsafe Vector Bfloat16MultiplyAddWideningToSinglePrecisionLower(Vector op1, Vector op2, Vector op3, ulong imm_index); // BFMLALB // MOVPRFX + + public static unsafe Vector Bfloat16MultiplyAddWideningToSinglePrecisionUpper(Vector op1, Vector op2, Vector op3); // BFMLALT // MOVPRFX + + public static unsafe Vector Bfloat16MultiplyAddWideningToSinglePrecisionUpper(Vector op1, Vector op2, Vector op3, ulong imm_index); // BFMLALT // MOVPRFX + + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right); // UZP1 + + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right); // UZP2 + + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data); // CLASTA // MOVPRFX + + public static unsafe bfloat16 ConditionalExtractAfterLastActiveElement(Vector mask, bfloat16 defaultValues, Vector data); // CLASTA // MOVPRFX + + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data); // CLASTA // MOVPRFX + + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data); // CLASTB // MOVPRFX + + public static unsafe bfloat16 ConditionalExtractLastActiveElement(Vector mask, bfloat16 defaultValues, Vector data); // CLASTB // MOVPRFX + + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data); // CLASTB // MOVPRFX + + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right); // SEL + + public static unsafe Vector ConvertToBFloat16(Vector value); // BFCVT // predicated, MOVPRFX + + public static unsafe Vector CreateFalseMaskBFloat16(); // PFALSE + + public static unsafe Vector CreateTrueMaskBFloat16([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // PTRUE + + public static unsafe Vector CreateWhileReadAfterWriteMask(bfloat16* left, bfloat16* right); // WHILERW + + public static unsafe Vector CreateWhileWriteAfterReadMask(bfloat16* left, bfloat16* right); // WHILEWR + + public static unsafe Vector DotProductBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // BFDOT // MOVPRFX + + public static unsafe Vector DownConvertNarrowingUpper(Vector value); // BFCVTNT // predicated + + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index); // DUP or TBL + + public static unsafe bfloat16 ExtractAfterLastScalar(Vector value); // LASTA // predicated + + public static unsafe Vector ExtractAfterLastVector(Vector value); // LASTA // predicated + + public static unsafe bfloat16 ExtractLastScalar(Vector value); // LASTB // predicated + + public static unsafe Vector ExtractLastVector(Vector value); // LASTB // predicated + + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index); // EXT // MOVPRFX + + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from); // CNTP + + public static unsafe Vector InsertIntoShiftedVector(Vector left, bfloat16 right); // INSR + + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right); // TRN1 + + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right); // ZIP2 + + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right); // ZIP1 + + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right); // TRN2 + + public static unsafe Vector LoadVector(Vector mask, bfloat16* address); // LD1H + + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, bfloat16* address); // LD1RQH + + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, bfloat16* address); // LD1ROH + + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, bfloat16* address); // LDFF1H + + public static unsafe Vector LoadVectorNonFaulting(bfloat16* address); // LDNF1H // predicated + + public static unsafe Vector LoadVectorNonTemporal(Vector mask, bfloat16* address); // LDNT1H + + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, bfloat16* address); // LD2H + + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, bfloat16* address); // LD3H + + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, bfloat16* address); // LD4H + + public static unsafe Vector PopCount(Vector value); // CNT // predicated, MOVPRFX + + public static unsafe Vector ReverseElement(Vector value); // REV + + public static unsafe Vector Splice(Vector mask, Vector left, Vector right); // SPLICE // MOVPRFX + + public static unsafe void Store(Vector mask, bfloat16* address, Vector data); // ST1H + + public static unsafe void Store(Vector mask, bfloat16* address, (Vector Value1, Vector Value2) data); // ST2H + + public static unsafe void Store(Vector mask, bfloat16* address, (Vector Value1, Vector Value2, Vector Value3) data); // ST3H + + public static unsafe void Store(Vector mask, bfloat16* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data); // ST4H + + public static unsafe void StoreNonTemporal(Vector mask, bfloat16* address, Vector data); // STNT1H + + public static unsafe Vector TransposeEven(Vector left, Vector right); // TRN1 + + public static unsafe Vector TransposeOdd(Vector left, Vector right); // TRN2 + + public static unsafe Vector UnzipEven(Vector left, Vector right); // UZP1 + + public static unsafe Vector UnzipOdd(Vector left, Vector right); // UZP2 + + public static unsafe Vector VectorTableLookup(Vector data, Vector indices); // TBL + + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices); // TBL + + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices); // TBX + + public static unsafe Vector ZipHigh(Vector left, Vector right); // ZIP2 + + public static unsafe Vector ZipLow(Vector left, Vector right); // ZIP1 + + + // All patterns used by PTRUE. + public enum SveMaskPattern : byte + { + LargestPowerOf2 = 0, // The largest power of 2. + VectorCount1 = 1, // 1 element. + VectorCount2 = 2, // 2 elements. + VectorCount3 = 3, // 3 elements. + VectorCount4 = 4, // 4 elements. + VectorCount5 = 5, // 5 elements. + VectorCount6 = 6, // 6 elements. + VectorCount7 = 7, // 7 elements. + VectorCount8 = 8, // 8 elements. + VectorCount16 = 9, // 16 elements. + VectorCount32 = 10, // 32 elements. + VectorCount64 = 11, // 64 elements. + VectorCount128 = 12, // 128 elements. + VectorCount256 = 13, // 256 elements. + LargestMultipleOf4 = 29, // The largest multiple of 4. + LargestMultipleOf3 = 30, // The largest multiple of 3. + All = 31 // All available (implicitly a multiple of two). + }; + + /// total method signatures: 60 + + + /// Optional Entries: + + public static unsafe Vector Bfloat16DotProduct(Vector addend, Vector left, bfloat16 right); // BFDOT // MOVPRFX + + public static unsafe Vector Bfloat16MultiplyAddWideningToSinglePrecisionLower(Vector op1, Vector op2, bfloat16 op3); // BFMLALB // MOVPRFX + + public static unsafe Vector Bfloat16MultiplyAddWideningToSinglePrecisionUpper(Vector op1, Vector op2, bfloat16 op3); // BFMLALT // MOVPRFX + + public static unsafe bfloat16 ConditionalExtractAfterLastActiveElement(Vector mask, bfloat16 defaultValue, Vector data); // CLASTA + + public static unsafe bfloat16 ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, bfloat16 defaultScalar, Vector data); // CLASTA + + public static unsafe bfloat16 ConditionalExtractLastActiveElement(Vector mask, bfloat16 defaultValue, Vector data); // CLASTB + + public static unsafe bfloat16 ConditionalExtractLastActiveElementAndReplicate(Vector mask, bfloat16 fallback, Vector data); // CLASTB + + /// total optional method signatures: 7 + +} + + +/// Full API +public abstract partial class SveBf16 : AdvSimd /// Feature: FEAT_BF16 +{ + /// Bfloat16DotProduct : BFloat16 dot product + + /// svfloat32_t svbfdot[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3) : "BFDOT Ztied1.S, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; BFDOT Zresult.S, Zop2.H, Zop3.H" + public static unsafe Vector Bfloat16DotProduct(Vector addend, Vector left, Vector right); + + + /// Bfloat16MatrixMultiplyAccumulate : BFloat16 matrix multiply-accumulate + + /// svfloat32_t svbfmmla[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3) : "BFMMLA Ztied1.S, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; BFMMLA Zresult.S, Zop2.H, Zop3.H" + public static unsafe Vector Bfloat16MatrixMultiplyAccumulate(Vector op1, Vector op2, Vector op3); + + + /// Bfloat16MultiplyAddWideningToSinglePrecisionLower : BFloat16 multiply-add long to single-precision (bottom) + + /// svfloat32_t svbfmlalb[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3) : "BFMLALB Ztied1.S, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; BFMLALB Zresult.S, Zop2.H, Zop3.H" + public static unsafe Vector Bfloat16MultiplyAddWideningToSinglePrecisionLower(Vector op1, Vector op2, Vector op3); + + /// svfloat32_t svbfmlalb_lane[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3, uint64_t imm_index) : "BFMLALB Ztied1.S, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; BFMLALB Zresult.S, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector Bfloat16MultiplyAddWideningToSinglePrecisionLower(Vector op1, Vector op2, Vector op3, ulong imm_index); + + + /// Bfloat16MultiplyAddWideningToSinglePrecisionUpper : BFloat16 multiply-add long to single-precision (top) + + /// svfloat32_t svbfmlalt[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3) : "BFMLALT Ztied1.S, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; BFMLALT Zresult.S, Zop2.H, Zop3.H" + public static unsafe Vector Bfloat16MultiplyAddWideningToSinglePrecisionUpper(Vector op1, Vector op2, Vector op3); + + /// svfloat32_t svbfmlalt_lane[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3, uint64_t imm_index) : "BFMLALT Ztied1.S, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; BFMLALT Zresult.S, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector Bfloat16MultiplyAddWideningToSinglePrecisionUpper(Vector op1, Vector op2, Vector op3, ulong imm_index); + + + /// ConcatenateEvenInt128FromTwoInputs : Concatenate even quadwords from two inputs + + /// svbfloat16_t svuzp1q[_bf16](svbfloat16_t op1, svbfloat16_t op2) : "UZP1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right); + + + /// ConcatenateOddInt128FromTwoInputs : Concatenate odd quadwords from two inputs + + /// svbfloat16_t svuzp2q[_bf16](svbfloat16_t op1, svbfloat16_t op2) : "UZP2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right); + + + /// ConditionalExtractAfterLastActiveElement : Conditionally extract element after last + + /// svbfloat16_t svclasta[_bf16](svbool_t pg, svbfloat16_t fallback, svbfloat16_t data) : "CLASTA Ztied.H, Pg, Ztied.H, Zdata.H" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.H, Pg, Zresult.H, Zdata.H" + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data); + + /// svbfloat16_t svclasta[_bf16](svbool_t pg, svbfloat16_t fallback, svbfloat16_t data) : "CLASTA Ztied.H, Pg, Ztied.H, Zdata.H" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.H, Pg, Zresult.H, Zdata.H" + /// bfloat16_t svclasta[_n_bf16](svbool_t pg, bfloat16_t fallback, svbfloat16_t data) : "CLASTA Wtied, Pg, Wtied, Zdata.H" or "CLASTA Htied, Pg, Htied, Zdata.H" + public static unsafe bfloat16 ConditionalExtractAfterLastActiveElement(Vector mask, bfloat16 defaultValues, Vector data); + + + /// ConditionalExtractAfterLastActiveElementAndReplicate : Conditionally extract element after last + + /// svbfloat16_t svclasta[_bf16](svbool_t pg, svbfloat16_t fallback, svbfloat16_t data) : "CLASTA Ztied.H, Pg, Ztied.H, Zdata.H" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.H, Pg, Zresult.H, Zdata.H" + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data); + + + /// ConditionalExtractLastActiveElement : Conditionally extract last element + + /// svbfloat16_t svclastb[_bf16](svbool_t pg, svbfloat16_t fallback, svbfloat16_t data) : "CLASTB Ztied.H, Pg, Ztied.H, Zdata.H" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.H, Pg, Zresult.H, Zdata.H" + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data); + + /// svbfloat16_t svclastb[_bf16](svbool_t pg, svbfloat16_t fallback, svbfloat16_t data) : "CLASTB Ztied.H, Pg, Ztied.H, Zdata.H" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.H, Pg, Zresult.H, Zdata.H" + /// bfloat16_t svclastb[_n_bf16](svbool_t pg, bfloat16_t fallback, svbfloat16_t data) : "CLASTB Wtied, Pg, Wtied, Zdata.H" or "CLASTB Htied, Pg, Htied, Zdata.H" + public static unsafe bfloat16 ConditionalExtractLastActiveElement(Vector mask, bfloat16 defaultValues, Vector data); + + + /// ConditionalExtractLastActiveElementAndReplicate : Conditionally extract last element + + /// svbfloat16_t svclastb[_bf16](svbool_t pg, svbfloat16_t fallback, svbfloat16_t data) : "CLASTB Ztied.H, Pg, Ztied.H, Zdata.H" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.H, Pg, Zresult.H, Zdata.H" + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data); + + + /// ConditionalSelect : Conditionally select elements + + /// svbfloat16_t svsel[_bf16](svbool_t pg, svbfloat16_t op1, svbfloat16_t op2) : "SEL Zresult.H, Pg, Zop1.H, Zop2.H" + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right); + + + /// ConvertToBFloat16 : Floating-point convert + + /// svbfloat16_t svcvt_bf16[_f32]_m(svbfloat16_t inactive, svbool_t pg, svfloat32_t op) : "BFCVT Ztied.H, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; BFCVT Zresult.H, Pg/M, Zop.S" + /// svbfloat16_t svcvt_bf16[_f32]_x(svbool_t pg, svfloat32_t op) : "BFCVT Ztied.H, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; BFCVT Zresult.H, Pg/M, Zop.S" + /// svbfloat16_t svcvt_bf16[_f32]_z(svbool_t pg, svfloat32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; BFCVT Zresult.H, Pg/M, Zop.S" + public static unsafe Vector ConvertToBFloat16(Vector value); + + + /// CreateFalseMaskBFloat16 : Set all predicate elements to false + + /// svbool_t svpfalse[_b]() : "PFALSE Presult.B" + public static unsafe Vector CreateFalseMaskBFloat16(); + + + /// CreateTrueMaskBFloat16 : Set predicate elements to true + + /// svbool_t svptrue_pat_b8(enum svpattern pattern) : "PTRUE Presult.B, pattern" + public static unsafe Vector CreateTrueMaskBFloat16([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + + /// CreateWhileReadAfterWriteMask : While free of read-after-write conflicts + + /// svbool_t svwhilerw[_bf16](const bfloat16_t *op1, const bfloat16_t *op2) : "WHILERW Presult.H, Xop1, Xop2" + public static unsafe Vector CreateWhileReadAfterWriteMask(bfloat16* left, bfloat16* right); + + + /// CreateWhileWriteAfterReadMask : While free of write-after-read conflicts + + /// svbool_t svwhilewr[_bf16](const bfloat16_t *op1, const bfloat16_t *op2) : "WHILEWR Presult.H, Xop1, Xop2" + public static unsafe Vector CreateWhileWriteAfterReadMask(bfloat16* left, bfloat16* right); + + + /// DotProductBySelectedScalar : BFloat16 dot product + + /// svfloat32_t svbfdot_lane[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3, uint64_t imm_index) : "BFDOT Ztied1.S, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; BFDOT Zresult.S, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector DotProductBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + + /// DownConvertNarrowingUpper : Down convert and narrow (top) + + /// svbfloat16_t svcvtnt_bf16[_f32]_m(svbfloat16_t even, svbool_t pg, svfloat32_t op) : "BFCVTNT Ztied.H, Pg/M, Zop.S" + /// svbfloat16_t svcvtnt_bf16[_f32]_x(svbfloat16_t even, svbool_t pg, svfloat32_t op) : "BFCVTNT Ztied.H, Pg/M, Zop.S" + public static unsafe Vector DownConvertNarrowingUpper(Vector value); + + + /// DuplicateSelectedScalarToVector : Broadcast a scalar value + + /// svbfloat16_t svdup_lane[_bf16](svbfloat16_t data, uint16_t index) : "DUP Zresult.H, Zdata.H[index]" or "TBL Zresult.H, Zdata.H, Zindex.H" + /// svbfloat16_t svdupq_lane[_bf16](svbfloat16_t data, uint64_t index) : "DUP Zresult.Q, Zdata.Q[index]" or "TBL Zresult.D, Zdata.D, Zindices_d.D" + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index); + + + /// ExtractAfterLastScalar : Extract element after last + + /// bfloat16_t svlasta[_bf16](svbool_t pg, svbfloat16_t op) : "LASTA Wresult, Pg, Zop.H" or "LASTA Hresult, Pg, Zop.H" + public static unsafe bfloat16 ExtractAfterLastScalar(Vector value); + + + /// ExtractAfterLastVector : Extract element after last + + /// bfloat16_t svlasta[_bf16](svbool_t pg, svbfloat16_t op) : "LASTA Wresult, Pg, Zop.H" or "LASTA Hresult, Pg, Zop.H" + public static unsafe Vector ExtractAfterLastVector(Vector value); + + + /// ExtractLastScalar : Extract last element + + /// bfloat16_t svlastb[_bf16](svbool_t pg, svbfloat16_t op) : "LASTB Wresult, Pg, Zop.H" or "LASTB Hresult, Pg, Zop.H" + public static unsafe bfloat16 ExtractLastScalar(Vector value); + + + /// ExtractLastVector : Extract last element + + /// bfloat16_t svlastb[_bf16](svbool_t pg, svbfloat16_t op) : "LASTB Wresult, Pg, Zop.H" or "LASTB Hresult, Pg, Zop.H" + public static unsafe Vector ExtractLastVector(Vector value); + + + /// ExtractVector : Extract vector from pair of vectors + + /// svbfloat16_t svext[_bf16](svbfloat16_t op1, svbfloat16_t op2, uint64_t imm3) : "EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 2" or "MOVPRFX Zresult, Zop1; EXT Zresult.B, Zresult.B, Zop2.B, #imm3 * 2" + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index); + + + /// GetActiveElementCount : Count set predicate bits + + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) : "CNTP Xresult, Pg, Pop.B" + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from); + + + /// InsertIntoShiftedVector : Insert scalar into shifted vector + + /// svbfloat16_t svinsr[_n_bf16](svbfloat16_t op1, bfloat16_t op2) : "INSR Ztied1.H, Wop2" or "INSR Ztied1.H, Hop2" + public static unsafe Vector InsertIntoShiftedVector(Vector left, bfloat16 right); + + + /// InterleaveEvenInt128FromTwoInputs : Interleave even quadwords from two inputs + + /// svbfloat16_t svtrn1q[_bf16](svbfloat16_t op1, svbfloat16_t op2) : "TRN1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right); + + + /// InterleaveInt128FromHighHalvesOfTwoInputs : Interleave quadwords from high halves of two inputs + + /// svbfloat16_t svzip2q[_bf16](svbfloat16_t op1, svbfloat16_t op2) : "ZIP2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right); + + + /// InterleaveInt128FromLowHalvesOfTwoInputs : Interleave quadwords from low halves of two inputs + + /// svbfloat16_t svzip1q[_bf16](svbfloat16_t op1, svbfloat16_t op2) : "ZIP1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right); + + + /// InterleaveOddInt128FromTwoInputs : Interleave odd quadwords from two inputs + + /// svbfloat16_t svtrn2q[_bf16](svbfloat16_t op1, svbfloat16_t op2) : "TRN2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right); + + + /// LoadVector : Unextended load + + /// svbfloat16_t svld1[_bf16](svbool_t pg, const bfloat16_t *base) : "LD1H Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVector(Vector mask, bfloat16* address); + + + /// LoadVector128AndReplicateToVector : Load and replicate 128 bits of data + + /// svbfloat16_t svld1rq[_bf16](svbool_t pg, const bfloat16_t *base) : "LD1RQH Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD1RQH Zresult.H, Pg/Z, [Xarray, #index * 2]" or "LD1RQH Zresult.H, Pg/Z, [Xbase, #0]" + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, bfloat16* address); + + + /// LoadVector256AndReplicateToVector : Load and replicate 256 bits of data + + /// svbfloat16_t svld1ro[_bf16](svbool_t pg, const bfloat16_t *base) : "LD1ROH Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD1ROH Zresult.H, Pg/Z, [Xarray, #index * 2]" or "LD1ROH Zresult.H, Pg/Z, [Xbase, #0]" + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, bfloat16* address); + + + /// LoadVectorFirstFaulting : Unextended load, first-faulting + + /// svbfloat16_t svldff1[_bf16](svbool_t pg, const bfloat16_t *base) : "LDFF1H Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1]" or "LDFF1H Zresult.H, Pg/Z, [Xbase, XZR, LSL #1]" + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, bfloat16* address); + + + /// LoadVectorNonFaulting : Unextended load, non-faulting + + /// svbfloat16_t svldnf1[_bf16](svbool_t pg, const bfloat16_t *base) : "LDNF1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorNonFaulting(bfloat16* address); + + + /// LoadVectorNonTemporal : Unextended load, non-temporal + + /// svbfloat16_t svldnt1[_bf16](svbool_t pg, const bfloat16_t *base) : "LDNT1H Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1]" or "LDNT1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorNonTemporal(Vector mask, bfloat16* address); + + + /// LoadVectorx2 : Load two-element tuples into two vectors + + /// svbfloat16x2_t svld2[_bf16](svbool_t pg, const bfloat16_t *base) : "LD2H {Zresult0.H, Zresult1.H}, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD2H {Zresult0.H, Zresult1.H}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, bfloat16* address); + + + /// LoadVectorx3 : Load three-element tuples into three vectors + + /// svbfloat16x3_t svld3[_bf16](svbool_t pg, const bfloat16_t *base) : "LD3H {Zresult0.H - Zresult2.H}, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD3H {Zresult0.H - Zresult2.H}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, bfloat16* address); + + + /// LoadVectorx4 : Load four-element tuples into four vectors + + /// svbfloat16x4_t svld4[_bf16](svbool_t pg, const bfloat16_t *base) : "LD4H {Zresult0.H - Zresult3.H}, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD4H {Zresult0.H - Zresult3.H}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, bfloat16* address); + + + /// PopCount : Count nonzero bits + + /// svuint16_t svcnt[_bf16]_m(svuint16_t inactive, svbool_t pg, svbfloat16_t op) : "CNT Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; CNT Zresult.H, Pg/M, Zop.H" + /// svuint16_t svcnt[_bf16]_x(svbool_t pg, svbfloat16_t op) : "CNT Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; CNT Zresult.H, Pg/M, Zop.H" + /// svuint16_t svcnt[_bf16]_z(svbool_t pg, svbfloat16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; CNT Zresult.H, Pg/M, Zop.H" + public static unsafe Vector PopCount(Vector value); + + + /// ReverseElement : Reverse all elements + + /// svbfloat16_t svrev[_bf16](svbfloat16_t op) : "REV Zresult.H, Zop.H" + public static unsafe Vector ReverseElement(Vector value); + + + /// Splice : Splice two vectors under predicate control + + /// svbfloat16_t svsplice[_bf16](svbool_t pg, svbfloat16_t op1, svbfloat16_t op2) : "SPLICE Ztied1.H, Pg, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; SPLICE Zresult.H, Pg, Zresult.H, Zop2.H" + public static unsafe Vector Splice(Vector mask, Vector left, Vector right); + + + /// Store : Non-truncating store + + /// void svst1[_bf16](svbool_t pg, bfloat16_t *base, svbfloat16_t data) : "ST1H Zdata.H, Pg, [Xarray, Xindex, LSL #1]" or "ST1H Zdata.H, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, bfloat16* address, Vector data); + + /// void svst2[_bf16](svbool_t pg, bfloat16_t *base, svbfloat16x2_t data) : "ST2H {Zdata0.H, Zdata1.H}, Pg, [Xarray, Xindex, LSL #1]" or "ST2H {Zdata0.H, Zdata1.H}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, bfloat16* address, (Vector Value1, Vector Value2) data); + + /// void svst3[_bf16](svbool_t pg, bfloat16_t *base, svbfloat16x3_t data) : "ST3H {Zdata0.H - Zdata2.H}, Pg, [Xarray, Xindex, LSL #1]" or "ST3H {Zdata0.H - Zdata2.H}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, bfloat16* address, (Vector Value1, Vector Value2, Vector Value3) data); + + /// void svst4[_bf16](svbool_t pg, bfloat16_t *base, svbfloat16x4_t data) : "ST4H {Zdata0.H - Zdata3.H}, Pg, [Xarray, Xindex, LSL #1]" or "ST4H {Zdata0.H - Zdata3.H}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, bfloat16* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data); + + + /// StoreNonTemporal : Non-truncating store, non-temporal + + /// void svstnt1[_bf16](svbool_t pg, bfloat16_t *base, svbfloat16_t data) : "STNT1H Zdata.H, Pg, [Xarray, Xindex, LSL #1]" or "STNT1H Zdata.H, Pg, [Xbase, #0, MUL VL]" + public static unsafe void StoreNonTemporal(Vector mask, bfloat16* address, Vector data); + + + /// TransposeEven : Interleave even elements from two inputs + + /// svbfloat16_t svtrn1[_bf16](svbfloat16_t op1, svbfloat16_t op2) : "TRN1 Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector TransposeEven(Vector left, Vector right); + + + /// TransposeOdd : Interleave odd elements from two inputs + + /// svbfloat16_t svtrn2[_bf16](svbfloat16_t op1, svbfloat16_t op2) : "TRN2 Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector TransposeOdd(Vector left, Vector right); + + + /// UnzipEven : Concatenate even elements from two inputs + + /// svbfloat16_t svuzp1[_bf16](svbfloat16_t op1, svbfloat16_t op2) : "UZP1 Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector UnzipEven(Vector left, Vector right); + + + /// UnzipOdd : Concatenate odd elements from two inputs + + /// svbfloat16_t svuzp2[_bf16](svbfloat16_t op1, svbfloat16_t op2) : "UZP2 Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector UnzipOdd(Vector left, Vector right); + + + /// VectorTableLookup : Table lookup in single-vector table + + /// svbfloat16_t svtbl[_bf16](svbfloat16_t data, svuint16_t indices) : "TBL Zresult.H, Zdata.H, Zindices.H" + public static unsafe Vector VectorTableLookup(Vector data, Vector indices); + + /// svbfloat16_t svtbl2[_bf16](svbfloat16x2_t data, svuint16_t indices) : "TBL Zresult.H, {Zdata0.H, Zdata1.H}, Zindices.H" + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices); + + + /// VectorTableLookupExtension : Table lookup in single-vector table (merging) + + /// svbfloat16_t svtbx[_bf16](svbfloat16_t fallback, svbfloat16_t data, svuint16_t indices) : "TBX Ztied.H, Zdata.H, Zindices.H" + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices); + + + /// ZipHigh : Interleave elements from high halves of two inputs + + /// svbfloat16_t svzip2[_bf16](svbfloat16_t op1, svbfloat16_t op2) : "ZIP2 Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector ZipHigh(Vector left, Vector right); + + + /// ZipLow : Interleave elements from low halves of two inputs + + /// svbfloat16_t svzip1[_bf16](svbfloat16_t op1, svbfloat16_t op2) : "ZIP1 Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector ZipLow(Vector left, Vector right); + + + /// total method signatures: 60 + /// total method names: 53 +} + + /// Optional Entries: + /// public static unsafe Vector Bfloat16DotProduct(Vector addend, Vector left, bfloat16 right); // svbfdot[_n_f32] + /// public static unsafe Vector Bfloat16MultiplyAddWideningToSinglePrecisionLower(Vector op1, Vector op2, bfloat16 op3); // svbfmlalb[_n_f32] + /// public static unsafe Vector Bfloat16MultiplyAddWideningToSinglePrecisionUpper(Vector op1, Vector op2, bfloat16 op3); // svbfmlalt[_n_f32] + /// public static unsafe bfloat16 ConditionalExtractAfterLastActiveElement(Vector mask, bfloat16 defaultValue, Vector data); // svclasta[_n_bf16] + /// public static unsafe bfloat16 ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, bfloat16 defaultScalar, Vector data); // svclasta[_n_bf16] + /// public static unsafe bfloat16 ConditionalExtractLastActiveElement(Vector mask, bfloat16 defaultValue, Vector data); // svclastb[_n_bf16] + /// public static unsafe bfloat16 ConditionalExtractLastActiveElementAndReplicate(Vector mask, bfloat16 fallback, Vector data); // svclastb[_n_bf16] + /// Total Maybe: 7 + + /// Rejected: + /// public static unsafe ulong CountElementsInAFullVector(Vector value); // svlen[_bf16] + /// public static unsafe Vector CreateTrueMaskBFloat16(); // svptrue_b8 + /// public static unsafe Vector DuplicateSelectedScalarToVector(bfloat16 value); // svdup[_n]_bf16 or svdup[_n]_bf16_m or svdup[_n]_bf16_x or svdup[_n]_bf16_z + /// public static unsafe Vector LoadVector(Vector mask, bfloat16* address, long vnum); // svld1_vnum[_bf16] + /// public static unsafe Vector LoadVectorFirstFaulting(Vector mask, bfloat16* address, long vnum); // svldff1_vnum[_bf16] + /// public static unsafe Vector LoadVectorNonFaulting(bfloat16* address, long vnum); // svldnf1_vnum[_bf16] + /// public static unsafe Vector LoadVectorNonTemporal(Vector mask, bfloat16* address, long vnum); // svldnt1_vnum[_bf16] + /// public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, bfloat16* address, long vnum); // svld2_vnum[_bf16] + /// public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, bfloat16* address, long vnum); // svld3_vnum[_bf16] + /// public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, bfloat16* address, long vnum); // svld4_vnum[_bf16] + /// public static unsafe void Store(Vector mask, bfloat16* base, long vnum, Vector data); // svst1_vnum[_bf16] + /// public static unsafe void Store(Vector mask, bfloat16* base, long vnum, (Vector data1, Vector data2)); // svst2_vnum[_bf16] + /// public static unsafe void Store(Vector mask, bfloat16* base, long vnum, (Vector data1, Vector data2, Vector data3)); // svst3_vnum[_bf16] + /// public static unsafe void Store(Vector mask, bfloat16* base, long vnum, (Vector data1, Vector data2, Vector data3, Vector data4)); // svst4_vnum[_bf16] + /// public static unsafe void StoreNonTemporal(Vector mask, bfloat16* base, long vnum, Vector data); // svstnt1_vnum[_bf16] + /// Total Rejected: 15 + + /// Total ACLE covered across API: 93 + diff --git a/sve_api/out_api/apiraw_FEAT_F32MM__.cs b/sve_api/out_api/apiraw_FEAT_F32MM__.cs new file mode 100644 index 0000000000000..d65af21cc403e --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_F32MM__.cs @@ -0,0 +1,29 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class SveF32mm : AdvSimd /// Feature: FEAT_F32MM +{ + + public static unsafe Vector MatrixMultiplyAccumulate(Vector op1, Vector op2, Vector op3); // FMMLA // MOVPRFX + + /// total method signatures: 1 + +} + + +/// Full API +public abstract partial class SveF32mm : AdvSimd /// Feature: FEAT_F32MM +{ + /// MatrixMultiplyAccumulate : Matrix multiply-accumulate + + /// svfloat32_t svmmla[_f32](svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) : "FMMLA Ztied1.S, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; FMMLA Zresult.S, Zop2.S, Zop3.S" + public static unsafe Vector MatrixMultiplyAccumulate(Vector op1, Vector op2, Vector op3); + + + /// total method signatures: 1 + /// total method names: 1 +} + + + /// Total ACLE covered across API: 1 + diff --git a/sve_api/out_api/apiraw_FEAT_F64MM__.cs b/sve_api/out_api/apiraw_FEAT_F64MM__.cs new file mode 100644 index 0000000000000..cf1dc70deccd5 --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_F64MM__.cs @@ -0,0 +1,281 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class SveF64mm : AdvSimd /// Feature: FEAT_F64MM +{ + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right); // UZP1 + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right); // UZP2 + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right); // TRN1 + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right); // ZIP2 + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right); // ZIP1 + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right); // TRN2 + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, T* address); // LD1ROW or LD1ROD or LD1ROB or LD1ROH + + public static unsafe Vector MatrixMultiplyAccumulate(Vector op1, Vector op2, Vector op3); // FMMLA // MOVPRFX + + /// total method signatures: 8 + +} + + +/// Full API +public abstract partial class SveF64mm : AdvSimd /// Feature: FEAT_F64MM +{ + /// ConcatenateEvenInt128FromTwoInputs : Concatenate even quadwords from two inputs + + /// svfloat32_t svuzp1q[_f32](svfloat32_t op1, svfloat32_t op2) : "UZP1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right); + + /// svfloat64_t svuzp1q[_f64](svfloat64_t op1, svfloat64_t op2) : "UZP1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right); + + /// svint8_t svuzp1q[_s8](svint8_t op1, svint8_t op2) : "UZP1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right); + + /// svint16_t svuzp1q[_s16](svint16_t op1, svint16_t op2) : "UZP1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right); + + /// svint32_t svuzp1q[_s32](svint32_t op1, svint32_t op2) : "UZP1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right); + + /// svint64_t svuzp1q[_s64](svint64_t op1, svint64_t op2) : "UZP1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right); + + /// svuint8_t svuzp1q[_u8](svuint8_t op1, svuint8_t op2) : "UZP1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right); + + /// svuint16_t svuzp1q[_u16](svuint16_t op1, svuint16_t op2) : "UZP1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right); + + /// svuint32_t svuzp1q[_u32](svuint32_t op1, svuint32_t op2) : "UZP1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right); + + /// svuint64_t svuzp1q[_u64](svuint64_t op1, svuint64_t op2) : "UZP1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right); + + + /// ConcatenateOddInt128FromTwoInputs : Concatenate odd quadwords from two inputs + + /// svfloat32_t svuzp2q[_f32](svfloat32_t op1, svfloat32_t op2) : "UZP2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right); + + /// svfloat64_t svuzp2q[_f64](svfloat64_t op1, svfloat64_t op2) : "UZP2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right); + + /// svint8_t svuzp2q[_s8](svint8_t op1, svint8_t op2) : "UZP2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right); + + /// svint16_t svuzp2q[_s16](svint16_t op1, svint16_t op2) : "UZP2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right); + + /// svint32_t svuzp2q[_s32](svint32_t op1, svint32_t op2) : "UZP2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right); + + /// svint64_t svuzp2q[_s64](svint64_t op1, svint64_t op2) : "UZP2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right); + + /// svuint8_t svuzp2q[_u8](svuint8_t op1, svuint8_t op2) : "UZP2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right); + + /// svuint16_t svuzp2q[_u16](svuint16_t op1, svuint16_t op2) : "UZP2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right); + + /// svuint32_t svuzp2q[_u32](svuint32_t op1, svuint32_t op2) : "UZP2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right); + + /// svuint64_t svuzp2q[_u64](svuint64_t op1, svuint64_t op2) : "UZP2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right); + + + /// InterleaveEvenInt128FromTwoInputs : Interleave even quadwords from two inputs + + /// svfloat32_t svtrn1q[_f32](svfloat32_t op1, svfloat32_t op2) : "TRN1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right); + + /// svfloat64_t svtrn1q[_f64](svfloat64_t op1, svfloat64_t op2) : "TRN1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right); + + /// svint8_t svtrn1q[_s8](svint8_t op1, svint8_t op2) : "TRN1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right); + + /// svint16_t svtrn1q[_s16](svint16_t op1, svint16_t op2) : "TRN1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right); + + /// svint32_t svtrn1q[_s32](svint32_t op1, svint32_t op2) : "TRN1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right); + + /// svint64_t svtrn1q[_s64](svint64_t op1, svint64_t op2) : "TRN1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right); + + /// svuint8_t svtrn1q[_u8](svuint8_t op1, svuint8_t op2) : "TRN1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right); + + /// svuint16_t svtrn1q[_u16](svuint16_t op1, svuint16_t op2) : "TRN1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right); + + /// svuint32_t svtrn1q[_u32](svuint32_t op1, svuint32_t op2) : "TRN1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right); + + /// svuint64_t svtrn1q[_u64](svuint64_t op1, svuint64_t op2) : "TRN1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right); + + + /// InterleaveInt128FromHighHalvesOfTwoInputs : Interleave quadwords from high halves of two inputs + + /// svfloat32_t svzip2q[_f32](svfloat32_t op1, svfloat32_t op2) : "ZIP2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right); + + /// svfloat64_t svzip2q[_f64](svfloat64_t op1, svfloat64_t op2) : "ZIP2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right); + + /// svint8_t svzip2q[_s8](svint8_t op1, svint8_t op2) : "ZIP2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right); + + /// svint16_t svzip2q[_s16](svint16_t op1, svint16_t op2) : "ZIP2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right); + + /// svint32_t svzip2q[_s32](svint32_t op1, svint32_t op2) : "ZIP2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right); + + /// svint64_t svzip2q[_s64](svint64_t op1, svint64_t op2) : "ZIP2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right); + + /// svuint8_t svzip2q[_u8](svuint8_t op1, svuint8_t op2) : "ZIP2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right); + + /// svuint16_t svzip2q[_u16](svuint16_t op1, svuint16_t op2) : "ZIP2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right); + + /// svuint32_t svzip2q[_u32](svuint32_t op1, svuint32_t op2) : "ZIP2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right); + + /// svuint64_t svzip2q[_u64](svuint64_t op1, svuint64_t op2) : "ZIP2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right); + + + /// InterleaveInt128FromLowHalvesOfTwoInputs : Interleave quadwords from low halves of two inputs + + /// svfloat32_t svzip1q[_f32](svfloat32_t op1, svfloat32_t op2) : "ZIP1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right); + + /// svfloat64_t svzip1q[_f64](svfloat64_t op1, svfloat64_t op2) : "ZIP1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right); + + /// svint8_t svzip1q[_s8](svint8_t op1, svint8_t op2) : "ZIP1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right); + + /// svint16_t svzip1q[_s16](svint16_t op1, svint16_t op2) : "ZIP1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right); + + /// svint32_t svzip1q[_s32](svint32_t op1, svint32_t op2) : "ZIP1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right); + + /// svint64_t svzip1q[_s64](svint64_t op1, svint64_t op2) : "ZIP1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right); + + /// svuint8_t svzip1q[_u8](svuint8_t op1, svuint8_t op2) : "ZIP1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right); + + /// svuint16_t svzip1q[_u16](svuint16_t op1, svuint16_t op2) : "ZIP1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right); + + /// svuint32_t svzip1q[_u32](svuint32_t op1, svuint32_t op2) : "ZIP1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right); + + /// svuint64_t svzip1q[_u64](svuint64_t op1, svuint64_t op2) : "ZIP1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right); + + + /// InterleaveOddInt128FromTwoInputs : Interleave odd quadwords from two inputs + + /// svfloat32_t svtrn2q[_f32](svfloat32_t op1, svfloat32_t op2) : "TRN2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right); + + /// svfloat64_t svtrn2q[_f64](svfloat64_t op1, svfloat64_t op2) : "TRN2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right); + + /// svint8_t svtrn2q[_s8](svint8_t op1, svint8_t op2) : "TRN2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right); + + /// svint16_t svtrn2q[_s16](svint16_t op1, svint16_t op2) : "TRN2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right); + + /// svint32_t svtrn2q[_s32](svint32_t op1, svint32_t op2) : "TRN2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right); + + /// svint64_t svtrn2q[_s64](svint64_t op1, svint64_t op2) : "TRN2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right); + + /// svuint8_t svtrn2q[_u8](svuint8_t op1, svuint8_t op2) : "TRN2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right); + + /// svuint16_t svtrn2q[_u16](svuint16_t op1, svuint16_t op2) : "TRN2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right); + + /// svuint32_t svtrn2q[_u32](svuint32_t op1, svuint32_t op2) : "TRN2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right); + + /// svuint64_t svtrn2q[_u64](svuint64_t op1, svuint64_t op2) : "TRN2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right); + + + /// LoadVector256AndReplicateToVector : Load and replicate 256 bits of data + + /// svfloat32_t svld1ro[_f32](svbool_t pg, const float32_t *base) : "LD1ROW Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2]" or "LD1ROW Zresult.S, Pg/Z, [Xarray, #index * 4]" or "LD1ROW Zresult.S, Pg/Z, [Xbase, #0]" + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, float* address); + + /// svfloat64_t svld1ro[_f64](svbool_t pg, const float64_t *base) : "LD1ROD Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3]" or "LD1ROD Zresult.D, Pg/Z, [Xarray, #index * 8]" or "LD1ROD Zresult.D, Pg/Z, [Xbase, #0]" + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, double* address); + + /// svint8_t svld1ro[_s8](svbool_t pg, const int8_t *base) : "LD1ROB Zresult.B, Pg/Z, [Xarray, Xindex]" or "LD1ROB Zresult.B, Pg/Z, [Xarray, #index]" or "LD1ROB Zresult.B, Pg/Z, [Xbase, #0]" + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, sbyte* address); + + /// svint16_t svld1ro[_s16](svbool_t pg, const int16_t *base) : "LD1ROH Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD1ROH Zresult.H, Pg/Z, [Xarray, #index * 2]" or "LD1ROH Zresult.H, Pg/Z, [Xbase, #0]" + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, short* address); + + /// svint32_t svld1ro[_s32](svbool_t pg, const int32_t *base) : "LD1ROW Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2]" or "LD1ROW Zresult.S, Pg/Z, [Xarray, #index * 4]" or "LD1ROW Zresult.S, Pg/Z, [Xbase, #0]" + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, int* address); + + /// svint64_t svld1ro[_s64](svbool_t pg, const int64_t *base) : "LD1ROD Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3]" or "LD1ROD Zresult.D, Pg/Z, [Xarray, #index * 8]" or "LD1ROD Zresult.D, Pg/Z, [Xbase, #0]" + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, long* address); + + /// svuint8_t svld1ro[_u8](svbool_t pg, const uint8_t *base) : "LD1ROB Zresult.B, Pg/Z, [Xarray, Xindex]" or "LD1ROB Zresult.B, Pg/Z, [Xarray, #index]" or "LD1ROB Zresult.B, Pg/Z, [Xbase, #0]" + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, byte* address); + + /// svuint16_t svld1ro[_u16](svbool_t pg, const uint16_t *base) : "LD1ROH Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD1ROH Zresult.H, Pg/Z, [Xarray, #index * 2]" or "LD1ROH Zresult.H, Pg/Z, [Xbase, #0]" + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, ushort* address); + + /// svuint32_t svld1ro[_u32](svbool_t pg, const uint32_t *base) : "LD1ROW Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2]" or "LD1ROW Zresult.S, Pg/Z, [Xarray, #index * 4]" or "LD1ROW Zresult.S, Pg/Z, [Xbase, #0]" + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, uint* address); + + /// svuint64_t svld1ro[_u64](svbool_t pg, const uint64_t *base) : "LD1ROD Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3]" or "LD1ROD Zresult.D, Pg/Z, [Xarray, #index * 8]" or "LD1ROD Zresult.D, Pg/Z, [Xbase, #0]" + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, ulong* address); + + + /// MatrixMultiplyAccumulate : Matrix multiply-accumulate + + /// svfloat64_t svmmla[_f64](svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) : "FMMLA Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; FMMLA Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector MatrixMultiplyAccumulate(Vector op1, Vector op2, Vector op3); + + + /// total method signatures: 71 + /// total method names: 8 +} + + + /// Total ACLE covered across API: 71 + diff --git a/sve_api/out_api/apiraw_FEAT_FP16__.cs b/sve_api/out_api/apiraw_FEAT_FP16__.cs new file mode 100644 index 0000000000000..47447ec3643ad --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_FP16__.cs @@ -0,0 +1,1321 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class SveFp16 : AdvSimd /// Feature: FEAT_FP16 +{ + + public static unsafe Vector Abs(Vector value); // FABS // predicated, MOVPRFX + + public static unsafe Vector AbsoluteCompareGreaterThan(Vector left, Vector right); // FACGT // predicated + + public static unsafe Vector AbsoluteCompareGreaterThanOrEqual(Vector left, Vector right); // FACGE // predicated + + public static unsafe Vector AbsoluteCompareLessThan(Vector left, Vector right); // FACGT // predicated + + public static unsafe Vector AbsoluteCompareLessThanOrEqual(Vector left, Vector right); // FACGE // predicated + + public static unsafe Vector AbsoluteDifference(Vector left, Vector right); // FABD // predicated, MOVPRFX + + public static unsafe Vector Add(Vector left, Vector right); // FADD // predicated, MOVPRFX + + public static unsafe Vector AddAcross(Vector value); // FADDV // predicated + + public static unsafe Vector AddPairwise(Vector left, Vector right); // FADDP // predicated, MOVPRFX + + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation); // FCADD // predicated, MOVPRFX + + public static unsafe Vector AddSequentialAcross(Vector initial, Vector value); // FADDA // predicated + + public static unsafe Vector CompareEqual(Vector left, Vector right); // FCMEQ // predicated + + public static unsafe Vector CompareGreaterThan(Vector left, Vector right); // FCMGT // predicated + + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right); // FCMGE // predicated + + public static unsafe Vector CompareLessThan(Vector left, Vector right); // FCMGT // predicated + + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right); // FCMGE // predicated + + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right); // FCMNE // predicated + + public static unsafe Vector CompareUnordered(Vector left, Vector right); // FCMUO // predicated + + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right); // UZP1 + + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right); // UZP2 + + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data); // CLASTA // MOVPRFX + + public static unsafe half ConditionalExtractAfterLastActiveElement(Vector mask, half defaultValues, Vector data); // CLASTA // MOVPRFX + + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data); // CLASTA // MOVPRFX + + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data); // CLASTB // MOVPRFX + + public static unsafe half ConditionalExtractLastActiveElement(Vector mask, half defaultValues, Vector data); // CLASTB // MOVPRFX + + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data); // CLASTB // MOVPRFX + + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right); // SEL + + public static unsafe Vector ConvertToDouble(Vector value); // FCVT // predicated, MOVPRFX + + /// T: [half, float], [half, double], [half, short], [half, int], [half, long], [half, ushort], [half, uint], [half, ulong] + public static unsafe Vector ConvertToHalf(Vector value); // FCVT or SCVTF or UCVTF // predicated, MOVPRFX + + public static unsafe Vector ConvertToInt16(Vector value); // FCVTZS // predicated, MOVPRFX + + public static unsafe Vector ConvertToInt32(Vector value); // FCVTZS // predicated, MOVPRFX + + public static unsafe Vector ConvertToInt64(Vector value); // FCVTZS // predicated, MOVPRFX + + public static unsafe Vector ConvertToSingle(Vector value); // FCVT // predicated, MOVPRFX + + public static unsafe Vector ConvertToUInt16(Vector value); // FCVTZU // predicated, MOVPRFX + + public static unsafe Vector ConvertToUInt32(Vector value); // FCVTZU // predicated, MOVPRFX + + public static unsafe Vector ConvertToUInt64(Vector value); // FCVTZU // predicated, MOVPRFX + + public static unsafe Vector CreateFalseMaskHalf(); // PFALSE + + public static unsafe Vector CreateTrueMaskHalf([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // PTRUE + + public static unsafe Vector CreateWhileReadAfterWriteMask(half* left, half* right); // WHILERW + + public static unsafe Vector CreateWhileWriteAfterReadMask(half* left, half* right); // WHILEWR + + public static unsafe Vector Divide(Vector left, Vector right); // FDIV or FDIVR // predicated, MOVPRFX + + public static unsafe Vector DownConvertNarrowingUpper(Vector value); // FCVTNT // predicated + + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index); // DUP or TBL + + public static unsafe half ExtractAfterLastScalar(Vector value); // LASTA // predicated + + public static unsafe Vector ExtractAfterLastVector(Vector value); // LASTA // predicated + + public static unsafe half ExtractLastScalar(Vector value); // LASTB // predicated + + public static unsafe Vector ExtractLastVector(Vector value); // LASTB // predicated + + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index); // EXT // MOVPRFX + + public static unsafe Vector FloatingPointExponentialAccelerator(Vector value); // FEXPA + + public static unsafe Vector FusedMultiplyAdd(Vector addend, Vector left, Vector right); // FMLA or FMAD // predicated, MOVPRFX + + public static unsafe Vector FusedMultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // FMLA // MOVPRFX + + public static unsafe Vector FusedMultiplyAddNegated(Vector addend, Vector left, Vector right); // FNMLA or FNMAD // predicated, MOVPRFX + + public static unsafe Vector FusedMultiplySubtract(Vector minuend, Vector left, Vector right); // FMLS or FMSB // predicated, MOVPRFX + + public static unsafe Vector FusedMultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // FMLS // MOVPRFX + + public static unsafe Vector FusedMultiplySubtractNegated(Vector minuend, Vector left, Vector right); // FNMLS or FNMSB // predicated, MOVPRFX + + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from); // CNTP + + public static unsafe Vector InsertIntoShiftedVector(Vector left, half right); // INSR + + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right); // TRN1 + + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right); // ZIP2 + + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right); // ZIP1 + + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right); // TRN2 + + public static unsafe Vector LoadVector(Vector mask, half* address); // LD1H + + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, half* address); // LD1RQH + + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, half* address); // LD1ROH + + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, half* address); // LDFF1H + + public static unsafe Vector LoadVectorNonFaulting(half* address); // LDNF1H // predicated + + public static unsafe Vector LoadVectorNonTemporal(Vector mask, half* address); // LDNT1H + + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, half* address); // LD2H + + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, half* address); // LD3H + + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, half* address); // LD4H + + public static unsafe Vector Log2(Vector value); // FLOGB // predicated, MOVPRFX + + public static unsafe Vector Max(Vector left, Vector right); // FMAX // predicated, MOVPRFX + + public static unsafe Vector MaxAcross(Vector value); // FMAXV // predicated + + public static unsafe Vector MaxNumber(Vector left, Vector right); // FMAXNM // predicated, MOVPRFX + + public static unsafe Vector MaxNumberAcross(Vector value); // FMAXNMV // predicated + + public static unsafe Vector MaxNumberPairwise(Vector left, Vector right); // FMAXNMP // predicated, MOVPRFX + + public static unsafe Vector MaxPairwise(Vector left, Vector right); // FMAXP // predicated, MOVPRFX + + public static unsafe Vector Min(Vector left, Vector right); // FMIN // predicated, MOVPRFX + + public static unsafe Vector MinAcross(Vector value); // FMINV // predicated + + public static unsafe Vector MinNumber(Vector left, Vector right); // FMINNM // predicated, MOVPRFX + + public static unsafe Vector MinNumberAcross(Vector value); // FMINNMV // predicated + + public static unsafe Vector MinNumberPairwise(Vector left, Vector right); // FMINNMP // predicated, MOVPRFX + + public static unsafe Vector MinPairwise(Vector left, Vector right); // FMINP // predicated, MOVPRFX + + public static unsafe Vector Multiply(Vector left, Vector right); // FMUL // predicated, MOVPRFX + + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation); // FCMLA // predicated, MOVPRFX + + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation); // FCMLA // MOVPRFX + + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3); // FMLALB // MOVPRFX + + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index); // FMLALB // MOVPRFX + + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3); // FMLALT // MOVPRFX + + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index); // FMLALT // MOVPRFX + + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex); // FMUL + + public static unsafe Vector MultiplyExtended(Vector left, Vector right); // FMULX // predicated, MOVPRFX + + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3); // FMLSLB // MOVPRFX + + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index); // FMLSLB // MOVPRFX + + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3); // FMLSLT // MOVPRFX + + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index); // FMLSLT // MOVPRFX + + public static unsafe Vector Negate(Vector value); // FNEG // predicated, MOVPRFX + + public static unsafe Vector PopCount(Vector value); // CNT // predicated, MOVPRFX + + public static unsafe Vector ReciprocalEstimate(Vector value); // FRECPE + + public static unsafe Vector ReciprocalExponent(Vector value); // FRECPX // predicated, MOVPRFX + + public static unsafe Vector ReciprocalSqrtEstimate(Vector value); // FRSQRTE + + public static unsafe Vector ReciprocalSqrtStep(Vector left, Vector right); // FRSQRTS + + public static unsafe Vector ReciprocalStep(Vector left, Vector right); // FRECPS + + public static unsafe Vector ReverseElement(Vector value); // REV + + public static unsafe Vector RoundAwayFromZero(Vector value); // FRINTA // predicated, MOVPRFX + + public static unsafe Vector RoundToNearest(Vector value); // FRINTN // predicated, MOVPRFX + + public static unsafe Vector RoundToNegativeInfinity(Vector value); // FRINTM // predicated, MOVPRFX + + public static unsafe Vector RoundToPositiveInfinity(Vector value); // FRINTP // predicated, MOVPRFX + + public static unsafe Vector RoundToZero(Vector value); // FRINTZ // predicated, MOVPRFX + + public static unsafe Vector Scale(Vector left, Vector right); // FSCALE // predicated, MOVPRFX + + public static unsafe Vector Splice(Vector mask, Vector left, Vector right); // SPLICE // MOVPRFX + + public static unsafe Vector Sqrt(Vector value); // FSQRT // predicated, MOVPRFX + + public static unsafe void Store(Vector mask, half* address, Vector data); // ST1H + + public static unsafe void Store(Vector mask, half* address, (Vector Value1, Vector Value2) data); // ST2H + + public static unsafe void Store(Vector mask, half* address, (Vector Value1, Vector Value2, Vector Value3) data); // ST3H + + public static unsafe void Store(Vector mask, half* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data); // ST4H + + public static unsafe void StoreNonTemporal(Vector mask, half* address, Vector data); // STNT1H + + public static unsafe Vector Subtract(Vector left, Vector right); // FSUB or FSUBR // predicated, MOVPRFX + + public static unsafe Vector TransposeEven(Vector left, Vector right); // TRN1 + + public static unsafe Vector TransposeOdd(Vector left, Vector right); // TRN2 + + public static unsafe Vector TrigonometricMultiplyAddCoefficient(Vector left, Vector right, [ConstantExpected] byte control); // FTMAD // MOVPRFX + + public static unsafe Vector TrigonometricSelectCoefficient(Vector value, Vector selector); // FTSSEL + + public static unsafe Vector TrigonometricStartingValue(Vector value, Vector sign); // FTSMUL + + public static unsafe Vector UnzipEven(Vector left, Vector right); // UZP1 + + public static unsafe Vector UnzipOdd(Vector left, Vector right); // UZP2 + + public static unsafe Vector UpConvertWideningUpper(Vector value); // FCVTLT // predicated + + public static unsafe Vector VectorTableLookup(Vector data, Vector indices); // TBL + + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices); // TBL + + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices); // TBX + + public static unsafe Vector ZipHigh(Vector left, Vector right); // ZIP2 + + public static unsafe Vector ZipLow(Vector left, Vector right); // ZIP1 + + + // All patterns used by PTRUE. + public enum SveMaskPattern : byte + { + LargestPowerOf2 = 0, // The largest power of 2. + VectorCount1 = 1, // 1 element. + VectorCount2 = 2, // 2 elements. + VectorCount3 = 3, // 3 elements. + VectorCount4 = 4, // 4 elements. + VectorCount5 = 5, // 5 elements. + VectorCount6 = 6, // 6 elements. + VectorCount7 = 7, // 7 elements. + VectorCount8 = 8, // 8 elements. + VectorCount16 = 9, // 16 elements. + VectorCount32 = 10, // 32 elements. + VectorCount64 = 11, // 64 elements. + VectorCount128 = 12, // 128 elements. + VectorCount256 = 13, // 256 elements. + LargestMultipleOf4 = 29, // The largest multiple of 4. + LargestMultipleOf3 = 30, // The largest multiple of 3. + All = 31 // All available (implicitly a multiple of two). + }; + + /// total method signatures: 131 + + + /// Optional Entries: + + public static unsafe Vector AbsoluteCompareGreaterThan(Vector left, half right); // FACGT // predicated + + public static unsafe Vector AbsoluteCompareGreaterThanOrEqual(Vector left, half right); // FACGE // predicated + + public static unsafe Vector AbsoluteCompareLessThan(Vector left, half right); // FACGT // predicated + + public static unsafe Vector AbsoluteCompareLessThanOrEqual(Vector left, half right); // FACGE // predicated + + public static unsafe Vector AbsoluteDifference(Vector left, half right); // FABD // predicated, MOVPRFX + + public static unsafe Vector Add(Vector left, half right); // FADD or FSUB // predicated, MOVPRFX + + public static unsafe Vector CompareEqual(Vector left, half right); // FCMEQ // predicated + + public static unsafe Vector CompareGreaterThan(Vector left, half right); // FCMGT // predicated + + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, half right); // FCMGE // predicated + + public static unsafe Vector CompareLessThan(Vector left, half right); // FCMLT or FCMGT // predicated + + public static unsafe Vector CompareLessThanOrEqual(Vector left, half right); // FCMLE or FCMGE // predicated + + public static unsafe Vector CompareNotEqualTo(Vector left, half right); // FCMNE // predicated + + public static unsafe Vector CompareUnordered(Vector left, half right); // FCMUO // predicated + + public static unsafe half ConditionalExtractAfterLastActiveElement(Vector mask, half defaultValue, Vector data); // CLASTA + + public static unsafe half ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, half defaultScalar, Vector data); // CLASTA + + public static unsafe half ConditionalExtractLastActiveElement(Vector mask, half defaultValue, Vector data); // CLASTB + + public static unsafe half ConditionalExtractLastActiveElementAndReplicate(Vector mask, half fallback, Vector data); // CLASTB + + public static unsafe Vector Divide(Vector left, half right); // FDIV or FDIVR // predicated, MOVPRFX + + public static unsafe Vector Max(Vector left, half right); // FMAX // predicated, MOVPRFX + + public static unsafe Vector MaxNumber(Vector left, half right); // FMAXNM // predicated, MOVPRFX + + public static unsafe Vector Min(Vector left, half right); // FMIN // predicated, MOVPRFX + + public static unsafe Vector MinNumber(Vector left, half right); // FMINNM // predicated, MOVPRFX + + public static unsafe Vector Multiply(Vector left, half right); // FMUL // predicated, MOVPRFX + + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, half right); // FMLA or FMAD // predicated, MOVPRFX + + public static unsafe Vector MultiplyAddNegated(Vector addend, Vector left, half right); // FNMLA or FNMAD // predicated, MOVPRFX + + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, half op3); // FMLALB // MOVPRFX + + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, half op3); // FMLALT // MOVPRFX + + public static unsafe Vector MultiplyExtended(Vector left, half right); // FMULX // predicated, MOVPRFX + + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, half right); // FMLS or FMSB // predicated, MOVPRFX + + public static unsafe Vector MultiplySubtractNegated(Vector minuend, Vector left, half right); // FNMLS or FNMSB // predicated, MOVPRFX + + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, half op3); // FMLSLB // MOVPRFX + + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, half op3); // FMLSLT // MOVPRFX + + public static unsafe Vector Subtract(Vector left, half right); // FSUB or FADD or FSUBR // predicated, MOVPRFX + + /// total optional method signatures: 33 + +} + + +/// Full API +public abstract partial class SveFp16 : AdvSimd /// Feature: FEAT_FP16 +{ + /// Abs : Absolute value + + /// svfloat16_t svabs[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) : "FABS Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; FABS Zresult.H, Pg/M, Zop.H" + /// svfloat16_t svabs[_f16]_x(svbool_t pg, svfloat16_t op) : "FABS Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; FABS Zresult.H, Pg/M, Zop.H" + /// svfloat16_t svabs[_f16]_z(svbool_t pg, svfloat16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; FABS Zresult.H, Pg/M, Zop.H" + public static unsafe Vector Abs(Vector value); + + + /// AbsoluteCompareGreaterThan : Absolute compare greater than + + /// svbool_t svacgt[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FACGT Presult.H, Pg/Z, Zop1.H, Zop2.H" + public static unsafe Vector AbsoluteCompareGreaterThan(Vector left, Vector right); + + + /// AbsoluteCompareGreaterThanOrEqual : Absolute compare greater than or equal to + + /// svbool_t svacge[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FACGE Presult.H, Pg/Z, Zop1.H, Zop2.H" + public static unsafe Vector AbsoluteCompareGreaterThanOrEqual(Vector left, Vector right); + + + /// AbsoluteCompareLessThan : Absolute compare less than + + /// svbool_t svaclt[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FACGT Presult.H, Pg/Z, Zop2.H, Zop1.H" + public static unsafe Vector AbsoluteCompareLessThan(Vector left, Vector right); + + + /// AbsoluteCompareLessThanOrEqual : Absolute compare less than or equal to + + /// svbool_t svacle[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FACGE Presult.H, Pg/Z, Zop2.H, Zop1.H" + public static unsafe Vector AbsoluteCompareLessThanOrEqual(Vector left, Vector right); + + + /// AbsoluteDifference : Absolute difference + + /// svfloat16_t svabd[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FABD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; FABD Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svfloat16_t svabd[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FABD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "FABD Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; FABD Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svfloat16_t svabd[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; FABD Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; FABD Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector AbsoluteDifference(Vector left, Vector right); + + + /// Add : Add + + /// svfloat16_t svadd[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; FADD Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svfloat16_t svadd[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "FADD Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "FADD Zresult.H, Zop1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; FADD Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svfloat16_t svadd[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; FADD Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; FADD Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector Add(Vector left, Vector right); + + + /// AddAcross : Add reduction + + /// float16_t svaddv[_f16](svbool_t pg, svfloat16_t op) : "FADDV Hresult, Pg, Zop.H" + public static unsafe Vector AddAcross(Vector value); + + + /// AddPairwise : Add pairwise + + /// svfloat16_t svaddp[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FADDP Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; FADDP Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svfloat16_t svaddp[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FADDP Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; FADDP Zresult.H, Pg/M, Zresult.H, Zop2.H" + public static unsafe Vector AddPairwise(Vector left, Vector right); + + + /// AddRotateComplex : Complex add with rotate + + /// svfloat16_t svcadd[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2, uint64_t imm_rotation) : "FCADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H, #imm_rotation" or "MOVPRFX Zresult, Zop1; FCADD Zresult.H, Pg/M, Zresult.H, Zop2.H, #imm_rotation" + /// svfloat16_t svcadd[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2, uint64_t imm_rotation) : "FCADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H, #imm_rotation" or "MOVPRFX Zresult, Zop1; FCADD Zresult.H, Pg/M, Zresult.H, Zop2.H, #imm_rotation" + /// svfloat16_t svcadd[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2, uint64_t imm_rotation) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; FCADD Zresult.H, Pg/M, Zresult.H, Zop2.H, #imm_rotation" + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation); + + + /// AddSequentialAcross : Add reduction (strictly-ordered) + + /// float16_t svadda[_f16](svbool_t pg, float16_t initial, svfloat16_t op) : "FADDA Htied, Pg, Htied, Zop.H" + public static unsafe Vector AddSequentialAcross(Vector initial, Vector value); + + + /// CompareEqual : Compare equal to + + /// svbool_t svcmpeq[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FCMEQ Presult.H, Pg/Z, Zop1.H, Zop2.H" + public static unsafe Vector CompareEqual(Vector left, Vector right); + + + /// CompareGreaterThan : Compare greater than + + /// svbool_t svcmpgt[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FCMGT Presult.H, Pg/Z, Zop1.H, Zop2.H" + public static unsafe Vector CompareGreaterThan(Vector left, Vector right); + + + /// CompareGreaterThanOrEqual : Compare greater than or equal to + + /// svbool_t svcmpge[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FCMGE Presult.H, Pg/Z, Zop1.H, Zop2.H" + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right); + + + /// CompareLessThan : Compare less than + + /// svbool_t svcmplt[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FCMGT Presult.H, Pg/Z, Zop2.H, Zop1.H" + public static unsafe Vector CompareLessThan(Vector left, Vector right); + + + /// CompareLessThanOrEqual : Compare less than or equal to + + /// svbool_t svcmple[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FCMGE Presult.H, Pg/Z, Zop2.H, Zop1.H" + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right); + + + /// CompareNotEqualTo : Compare not equal to + + /// svbool_t svcmpne[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FCMNE Presult.H, Pg/Z, Zop1.H, Zop2.H" + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right); + + + /// CompareUnordered : Compare unordered with + + /// svbool_t svcmpuo[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FCMUO Presult.H, Pg/Z, Zop1.H, Zop2.H" + public static unsafe Vector CompareUnordered(Vector left, Vector right); + + + /// ConcatenateEvenInt128FromTwoInputs : Concatenate even quadwords from two inputs + + /// svfloat16_t svuzp1q[_f16](svfloat16_t op1, svfloat16_t op2) : "UZP1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right); + + + /// ConcatenateOddInt128FromTwoInputs : Concatenate odd quadwords from two inputs + + /// svfloat16_t svuzp2q[_f16](svfloat16_t op1, svfloat16_t op2) : "UZP2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right); + + + /// ConditionalExtractAfterLastActiveElement : Conditionally extract element after last + + /// svfloat16_t svclasta[_f16](svbool_t pg, svfloat16_t fallback, svfloat16_t data) : "CLASTA Ztied.H, Pg, Ztied.H, Zdata.H" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.H, Pg, Zresult.H, Zdata.H" + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data); + + /// svfloat16_t svclasta[_f16](svbool_t pg, svfloat16_t fallback, svfloat16_t data) : "CLASTA Ztied.H, Pg, Ztied.H, Zdata.H" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.H, Pg, Zresult.H, Zdata.H" + /// float16_t svclasta[_n_f16](svbool_t pg, float16_t fallback, svfloat16_t data) : "CLASTA Wtied, Pg, Wtied, Zdata.H" or "CLASTA Htied, Pg, Htied, Zdata.H" + public static unsafe half ConditionalExtractAfterLastActiveElement(Vector mask, half defaultValues, Vector data); + + + /// ConditionalExtractAfterLastActiveElementAndReplicate : Conditionally extract element after last + + /// svfloat16_t svclasta[_f16](svbool_t pg, svfloat16_t fallback, svfloat16_t data) : "CLASTA Ztied.H, Pg, Ztied.H, Zdata.H" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.H, Pg, Zresult.H, Zdata.H" + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data); + + + /// ConditionalExtractLastActiveElement : Conditionally extract last element + + /// svfloat16_t svclastb[_f16](svbool_t pg, svfloat16_t fallback, svfloat16_t data) : "CLASTB Ztied.H, Pg, Ztied.H, Zdata.H" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.H, Pg, Zresult.H, Zdata.H" + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data); + + /// svfloat16_t svclastb[_f16](svbool_t pg, svfloat16_t fallback, svfloat16_t data) : "CLASTB Ztied.H, Pg, Ztied.H, Zdata.H" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.H, Pg, Zresult.H, Zdata.H" + /// float16_t svclastb[_n_f16](svbool_t pg, float16_t fallback, svfloat16_t data) : "CLASTB Wtied, Pg, Wtied, Zdata.H" or "CLASTB Htied, Pg, Htied, Zdata.H" + public static unsafe half ConditionalExtractLastActiveElement(Vector mask, half defaultValues, Vector data); + + + /// ConditionalExtractLastActiveElementAndReplicate : Conditionally extract last element + + /// svfloat16_t svclastb[_f16](svbool_t pg, svfloat16_t fallback, svfloat16_t data) : "CLASTB Ztied.H, Pg, Ztied.H, Zdata.H" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.H, Pg, Zresult.H, Zdata.H" + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data); + + + /// ConditionalSelect : Conditionally select elements + + /// svfloat16_t svsel[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "SEL Zresult.H, Pg, Zop1.H, Zop2.H" + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right); + + + /// ConvertToDouble : Floating-point convert + + /// svfloat64_t svcvt_f64[_f16]_m(svfloat64_t inactive, svbool_t pg, svfloat16_t op) : "FCVT Ztied.D, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; FCVT Zresult.D, Pg/M, Zop.H" + /// svfloat64_t svcvt_f64[_f16]_x(svbool_t pg, svfloat16_t op) : "FCVT Ztied.D, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; FCVT Zresult.D, Pg/M, Zop.H" + /// svfloat64_t svcvt_f64[_f16]_z(svbool_t pg, svfloat16_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; FCVT Zresult.D, Pg/M, Zop.H" + public static unsafe Vector ConvertToDouble(Vector value); + + + /// ConvertToHalf : Floating-point convert + + /// svfloat16_t svcvt_f16[_f32]_m(svfloat16_t inactive, svbool_t pg, svfloat32_t op) : "FCVT Ztied.H, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; FCVT Zresult.H, Pg/M, Zop.S" + /// svfloat16_t svcvt_f16[_f32]_x(svbool_t pg, svfloat32_t op) : "FCVT Ztied.H, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; FCVT Zresult.H, Pg/M, Zop.S" + /// svfloat16_t svcvt_f16[_f32]_z(svbool_t pg, svfloat32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; FCVT Zresult.H, Pg/M, Zop.S" + public static unsafe Vector ConvertToHalf(Vector value); + + /// svfloat16_t svcvt_f16[_f64]_m(svfloat16_t inactive, svbool_t pg, svfloat64_t op) : "FCVT Ztied.H, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; FCVT Zresult.H, Pg/M, Zop.D" + /// svfloat16_t svcvt_f16[_f64]_x(svbool_t pg, svfloat64_t op) : "FCVT Ztied.H, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; FCVT Zresult.H, Pg/M, Zop.D" + /// svfloat16_t svcvt_f16[_f64]_z(svbool_t pg, svfloat64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; FCVT Zresult.H, Pg/M, Zop.D" + public static unsafe Vector ConvertToHalf(Vector value); + + /// svfloat16_t svcvt_f16[_s16]_m(svfloat16_t inactive, svbool_t pg, svint16_t op) : "SCVTF Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; SCVTF Zresult.H, Pg/M, Zop.H" + /// svfloat16_t svcvt_f16[_s16]_x(svbool_t pg, svint16_t op) : "SCVTF Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; SCVTF Zresult.H, Pg/M, Zop.H" + /// svfloat16_t svcvt_f16[_s16]_z(svbool_t pg, svint16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; SCVTF Zresult.H, Pg/M, Zop.H" + public static unsafe Vector ConvertToHalf(Vector value); + + /// svfloat16_t svcvt_f16[_s32]_m(svfloat16_t inactive, svbool_t pg, svint32_t op) : "SCVTF Ztied.H, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; SCVTF Zresult.H, Pg/M, Zop.S" + /// svfloat16_t svcvt_f16[_s32]_x(svbool_t pg, svint32_t op) : "SCVTF Ztied.H, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; SCVTF Zresult.H, Pg/M, Zop.S" + /// svfloat16_t svcvt_f16[_s32]_z(svbool_t pg, svint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; SCVTF Zresult.H, Pg/M, Zop.S" + public static unsafe Vector ConvertToHalf(Vector value); + + /// svfloat16_t svcvt_f16[_s64]_m(svfloat16_t inactive, svbool_t pg, svint64_t op) : "SCVTF Ztied.H, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; SCVTF Zresult.H, Pg/M, Zop.D" + /// svfloat16_t svcvt_f16[_s64]_x(svbool_t pg, svint64_t op) : "SCVTF Ztied.H, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; SCVTF Zresult.H, Pg/M, Zop.D" + /// svfloat16_t svcvt_f16[_s64]_z(svbool_t pg, svint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; SCVTF Zresult.H, Pg/M, Zop.D" + public static unsafe Vector ConvertToHalf(Vector value); + + /// svfloat16_t svcvt_f16[_u16]_m(svfloat16_t inactive, svbool_t pg, svuint16_t op) : "UCVTF Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; UCVTF Zresult.H, Pg/M, Zop.H" + /// svfloat16_t svcvt_f16[_u16]_x(svbool_t pg, svuint16_t op) : "UCVTF Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; UCVTF Zresult.H, Pg/M, Zop.H" + /// svfloat16_t svcvt_f16[_u16]_z(svbool_t pg, svuint16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; UCVTF Zresult.H, Pg/M, Zop.H" + public static unsafe Vector ConvertToHalf(Vector value); + + /// svfloat16_t svcvt_f16[_u32]_m(svfloat16_t inactive, svbool_t pg, svuint32_t op) : "UCVTF Ztied.H, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; UCVTF Zresult.H, Pg/M, Zop.S" + /// svfloat16_t svcvt_f16[_u32]_x(svbool_t pg, svuint32_t op) : "UCVTF Ztied.H, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; UCVTF Zresult.H, Pg/M, Zop.S" + /// svfloat16_t svcvt_f16[_u32]_z(svbool_t pg, svuint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; UCVTF Zresult.H, Pg/M, Zop.S" + public static unsafe Vector ConvertToHalf(Vector value); + + /// svfloat16_t svcvt_f16[_u64]_m(svfloat16_t inactive, svbool_t pg, svuint64_t op) : "UCVTF Ztied.H, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; UCVTF Zresult.H, Pg/M, Zop.D" + /// svfloat16_t svcvt_f16[_u64]_x(svbool_t pg, svuint64_t op) : "UCVTF Ztied.H, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; UCVTF Zresult.H, Pg/M, Zop.D" + /// svfloat16_t svcvt_f16[_u64]_z(svbool_t pg, svuint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; UCVTF Zresult.H, Pg/M, Zop.D" + public static unsafe Vector ConvertToHalf(Vector value); + + + /// ConvertToInt16 : Floating-point convert + + /// svint16_t svcvt_s16[_f16]_m(svint16_t inactive, svbool_t pg, svfloat16_t op) : "FCVTZS Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; FCVTZS Zresult.H, Pg/M, Zop.H" + /// svint16_t svcvt_s16[_f16]_x(svbool_t pg, svfloat16_t op) : "FCVTZS Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; FCVTZS Zresult.H, Pg/M, Zop.H" + /// svint16_t svcvt_s16[_f16]_z(svbool_t pg, svfloat16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; FCVTZS Zresult.H, Pg/M, Zop.H" + public static unsafe Vector ConvertToInt16(Vector value); + + + /// ConvertToInt32 : Floating-point convert + + /// svint32_t svcvt_s32[_f16]_m(svint32_t inactive, svbool_t pg, svfloat16_t op) : "FCVTZS Ztied.S, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; FCVTZS Zresult.S, Pg/M, Zop.H" + /// svint32_t svcvt_s32[_f16]_x(svbool_t pg, svfloat16_t op) : "FCVTZS Ztied.S, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; FCVTZS Zresult.S, Pg/M, Zop.H" + /// svint32_t svcvt_s32[_f16]_z(svbool_t pg, svfloat16_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; FCVTZS Zresult.S, Pg/M, Zop.H" + public static unsafe Vector ConvertToInt32(Vector value); + + + /// ConvertToInt64 : Floating-point convert + + /// svint64_t svcvt_s64[_f16]_m(svint64_t inactive, svbool_t pg, svfloat16_t op) : "FCVTZS Ztied.D, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; FCVTZS Zresult.D, Pg/M, Zop.H" + /// svint64_t svcvt_s64[_f16]_x(svbool_t pg, svfloat16_t op) : "FCVTZS Ztied.D, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; FCVTZS Zresult.D, Pg/M, Zop.H" + /// svint64_t svcvt_s64[_f16]_z(svbool_t pg, svfloat16_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; FCVTZS Zresult.D, Pg/M, Zop.H" + public static unsafe Vector ConvertToInt64(Vector value); + + + /// ConvertToSingle : Floating-point convert + + /// svfloat32_t svcvt_f32[_f16]_m(svfloat32_t inactive, svbool_t pg, svfloat16_t op) : "FCVT Ztied.S, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; FCVT Zresult.S, Pg/M, Zop.H" + /// svfloat32_t svcvt_f32[_f16]_x(svbool_t pg, svfloat16_t op) : "FCVT Ztied.S, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; FCVT Zresult.S, Pg/M, Zop.H" + /// svfloat32_t svcvt_f32[_f16]_z(svbool_t pg, svfloat16_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; FCVT Zresult.S, Pg/M, Zop.H" + public static unsafe Vector ConvertToSingle(Vector value); + + + /// ConvertToUInt16 : Floating-point convert + + /// svuint16_t svcvt_u16[_f16]_m(svuint16_t inactive, svbool_t pg, svfloat16_t op) : "FCVTZU Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; FCVTZU Zresult.H, Pg/M, Zop.H" + /// svuint16_t svcvt_u16[_f16]_x(svbool_t pg, svfloat16_t op) : "FCVTZU Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; FCVTZU Zresult.H, Pg/M, Zop.H" + /// svuint16_t svcvt_u16[_f16]_z(svbool_t pg, svfloat16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; FCVTZU Zresult.H, Pg/M, Zop.H" + public static unsafe Vector ConvertToUInt16(Vector value); + + + /// ConvertToUInt32 : Floating-point convert + + /// svuint32_t svcvt_u32[_f16]_m(svuint32_t inactive, svbool_t pg, svfloat16_t op) : "FCVTZU Ztied.S, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; FCVTZU Zresult.S, Pg/M, Zop.H" + /// svuint32_t svcvt_u32[_f16]_x(svbool_t pg, svfloat16_t op) : "FCVTZU Ztied.S, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; FCVTZU Zresult.S, Pg/M, Zop.H" + /// svuint32_t svcvt_u32[_f16]_z(svbool_t pg, svfloat16_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; FCVTZU Zresult.S, Pg/M, Zop.H" + public static unsafe Vector ConvertToUInt32(Vector value); + + + /// ConvertToUInt64 : Floating-point convert + + /// svuint64_t svcvt_u64[_f16]_m(svuint64_t inactive, svbool_t pg, svfloat16_t op) : "FCVTZU Ztied.D, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; FCVTZU Zresult.D, Pg/M, Zop.H" + /// svuint64_t svcvt_u64[_f16]_x(svbool_t pg, svfloat16_t op) : "FCVTZU Ztied.D, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; FCVTZU Zresult.D, Pg/M, Zop.H" + /// svuint64_t svcvt_u64[_f16]_z(svbool_t pg, svfloat16_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; FCVTZU Zresult.D, Pg/M, Zop.H" + public static unsafe Vector ConvertToUInt64(Vector value); + + + /// CreateFalseMaskHalf : Set all predicate elements to false + + /// svbool_t svpfalse[_b]() : "PFALSE Presult.B" + public static unsafe Vector CreateFalseMaskHalf(); + + + /// CreateTrueMaskHalf : Set predicate elements to true + + /// svbool_t svptrue_pat_b8(enum svpattern pattern) : "PTRUE Presult.B, pattern" + public static unsafe Vector CreateTrueMaskHalf([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + + /// CreateWhileReadAfterWriteMask : While free of read-after-write conflicts + + /// svbool_t svwhilerw[_f16](const float16_t *op1, const float16_t *op2) : "WHILERW Presult.H, Xop1, Xop2" + public static unsafe Vector CreateWhileReadAfterWriteMask(half* left, half* right); + + + /// CreateWhileWriteAfterReadMask : While free of write-after-read conflicts + + /// svbool_t svwhilewr[_f16](const float16_t *op1, const float16_t *op2) : "WHILEWR Presult.H, Xop1, Xop2" + public static unsafe Vector CreateWhileWriteAfterReadMask(half* left, half* right); + + + /// Divide : Divide + + /// svfloat16_t svdiv[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FDIV Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; FDIV Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svfloat16_t svdiv[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FDIV Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "FDIVR Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; FDIV Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svfloat16_t svdiv[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; FDIV Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; FDIVR Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector Divide(Vector left, Vector right); + + + /// DownConvertNarrowingUpper : Down convert and narrow (top) + + /// svfloat16_t svcvtnt_f16[_f32]_m(svfloat16_t even, svbool_t pg, svfloat32_t op) : "FCVTNT Ztied.H, Pg/M, Zop.S" + /// svfloat16_t svcvtnt_f16[_f32]_x(svfloat16_t even, svbool_t pg, svfloat32_t op) : "FCVTNT Ztied.H, Pg/M, Zop.S" + public static unsafe Vector DownConvertNarrowingUpper(Vector value); + + + /// DuplicateSelectedScalarToVector : Broadcast a scalar value + + /// svfloat16_t svdup_lane[_f16](svfloat16_t data, uint16_t index) : "DUP Zresult.H, Zdata.H[index]" or "TBL Zresult.H, Zdata.H, Zindex.H" + /// svfloat16_t svdupq_lane[_f16](svfloat16_t data, uint64_t index) : "DUP Zresult.Q, Zdata.Q[index]" or "TBL Zresult.D, Zdata.D, Zindices_d.D" + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index); + + + /// ExtractAfterLastScalar : Extract element after last + + /// float16_t svlasta[_f16](svbool_t pg, svfloat16_t op) : "LASTA Wresult, Pg, Zop.H" or "LASTA Hresult, Pg, Zop.H" + public static unsafe half ExtractAfterLastScalar(Vector value); + + + /// ExtractAfterLastVector : Extract element after last + + /// float16_t svlasta[_f16](svbool_t pg, svfloat16_t op) : "LASTA Wresult, Pg, Zop.H" or "LASTA Hresult, Pg, Zop.H" + public static unsafe Vector ExtractAfterLastVector(Vector value); + + + /// ExtractLastScalar : Extract last element + + /// float16_t svlastb[_f16](svbool_t pg, svfloat16_t op) : "LASTB Wresult, Pg, Zop.H" or "LASTB Hresult, Pg, Zop.H" + public static unsafe half ExtractLastScalar(Vector value); + + + /// ExtractLastVector : Extract last element + + /// float16_t svlastb[_f16](svbool_t pg, svfloat16_t op) : "LASTB Wresult, Pg, Zop.H" or "LASTB Hresult, Pg, Zop.H" + public static unsafe Vector ExtractLastVector(Vector value); + + + /// ExtractVector : Extract vector from pair of vectors + + /// svfloat16_t svext[_f16](svfloat16_t op1, svfloat16_t op2, uint64_t imm3) : "EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 2" or "MOVPRFX Zresult, Zop1; EXT Zresult.B, Zresult.B, Zop2.B, #imm3 * 2" + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index); + + + /// FloatingPointExponentialAccelerator : Floating-point exponential accelerator + + /// svfloat16_t svexpa[_f16](svuint16_t op) : "FEXPA Zresult.H, Zop.H" + public static unsafe Vector FloatingPointExponentialAccelerator(Vector value); + + + /// FusedMultiplyAdd : Multiply-add, addend first + + /// svfloat16_t svmla[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) : "FMLA Ztied1.H, Pg/M, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; FMLA Zresult.H, Pg/M, Zop2.H, Zop3.H" + /// svfloat16_t svmla[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) : "FMLA Ztied1.H, Pg/M, Zop2.H, Zop3.H" or "FMAD Ztied2.H, Pg/M, Zop3.H, Zop1.H" or "FMAD Ztied3.H, Pg/M, Zop2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; FMLA Zresult.H, Pg/M, Zop2.H, Zop3.H" + /// svfloat16_t svmla[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; FMLA Zresult.H, Pg/M, Zop2.H, Zop3.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; FMAD Zresult.H, Pg/M, Zop3.H, Zop1.H" or "MOVPRFX Zresult.H, Pg/Z, Zop3.H; FMAD Zresult.H, Pg/M, Zop2.H, Zop1.H" + public static unsafe Vector FusedMultiplyAdd(Vector addend, Vector left, Vector right); + + + /// FusedMultiplyAddBySelectedScalar : Multiply-add, addend first + + /// svfloat16_t svmla_lane[_f16](svfloat16_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index) : "FMLA Ztied1.H, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; FMLA Zresult.H, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector FusedMultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + + /// FusedMultiplyAddNegated : Negated multiply-add, addend first + + /// svfloat16_t svnmla[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) : "FNMLA Ztied1.H, Pg/M, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; FNMLA Zresult.H, Pg/M, Zop2.H, Zop3.H" + /// svfloat16_t svnmla[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) : "FNMLA Ztied1.H, Pg/M, Zop2.H, Zop3.H" or "FNMAD Ztied2.H, Pg/M, Zop3.H, Zop1.H" or "FNMAD Ztied3.H, Pg/M, Zop2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; FNMLA Zresult.H, Pg/M, Zop2.H, Zop3.H" + /// svfloat16_t svnmla[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; FNMLA Zresult.H, Pg/M, Zop2.H, Zop3.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; FNMAD Zresult.H, Pg/M, Zop3.H, Zop1.H" or "MOVPRFX Zresult.H, Pg/Z, Zop3.H; FNMAD Zresult.H, Pg/M, Zop2.H, Zop1.H" + public static unsafe Vector FusedMultiplyAddNegated(Vector addend, Vector left, Vector right); + + + /// FusedMultiplySubtract : Multiply-subtract, minuend first + + /// svfloat16_t svmls[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) : "FMLS Ztied1.H, Pg/M, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; FMLS Zresult.H, Pg/M, Zop2.H, Zop3.H" + /// svfloat16_t svmls[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) : "FMLS Ztied1.H, Pg/M, Zop2.H, Zop3.H" or "FMSB Ztied2.H, Pg/M, Zop3.H, Zop1.H" or "FMSB Ztied3.H, Pg/M, Zop2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; FMLS Zresult.H, Pg/M, Zop2.H, Zop3.H" + /// svfloat16_t svmls[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; FMLS Zresult.H, Pg/M, Zop2.H, Zop3.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; FMSB Zresult.H, Pg/M, Zop3.H, Zop1.H" or "MOVPRFX Zresult.H, Pg/Z, Zop3.H; FMSB Zresult.H, Pg/M, Zop2.H, Zop1.H" + public static unsafe Vector FusedMultiplySubtract(Vector minuend, Vector left, Vector right); + + + /// FusedMultiplySubtractBySelectedScalar : Multiply-subtract, minuend first + + /// svfloat16_t svmls_lane[_f16](svfloat16_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index) : "FMLS Ztied1.H, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; FMLS Zresult.H, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector FusedMultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + + /// FusedMultiplySubtractNegated : Negated multiply-subtract, minuend first + + /// svfloat16_t svnmls[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) : "FNMLS Ztied1.H, Pg/M, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; FNMLS Zresult.H, Pg/M, Zop2.H, Zop3.H" + /// svfloat16_t svnmls[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) : "FNMLS Ztied1.H, Pg/M, Zop2.H, Zop3.H" or "FNMSB Ztied2.H, Pg/M, Zop3.H, Zop1.H" or "FNMSB Ztied3.H, Pg/M, Zop2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; FNMLS Zresult.H, Pg/M, Zop2.H, Zop3.H" + /// svfloat16_t svnmls[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; FNMLS Zresult.H, Pg/M, Zop2.H, Zop3.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; FNMSB Zresult.H, Pg/M, Zop3.H, Zop1.H" or "MOVPRFX Zresult.H, Pg/Z, Zop3.H; FNMSB Zresult.H, Pg/M, Zop2.H, Zop1.H" + public static unsafe Vector FusedMultiplySubtractNegated(Vector minuend, Vector left, Vector right); + + + /// GetActiveElementCount : Count set predicate bits + + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) : "CNTP Xresult, Pg, Pop.B" + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from); + + + /// InsertIntoShiftedVector : Insert scalar into shifted vector + + /// svfloat16_t svinsr[_n_f16](svfloat16_t op1, float16_t op2) : "INSR Ztied1.H, Wop2" or "INSR Ztied1.H, Hop2" + public static unsafe Vector InsertIntoShiftedVector(Vector left, half right); + + + /// InterleaveEvenInt128FromTwoInputs : Interleave even quadwords from two inputs + + /// svfloat16_t svtrn1q[_f16](svfloat16_t op1, svfloat16_t op2) : "TRN1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right); + + + /// InterleaveInt128FromHighHalvesOfTwoInputs : Interleave quadwords from high halves of two inputs + + /// svfloat16_t svzip2q[_f16](svfloat16_t op1, svfloat16_t op2) : "ZIP2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right); + + + /// InterleaveInt128FromLowHalvesOfTwoInputs : Interleave quadwords from low halves of two inputs + + /// svfloat16_t svzip1q[_f16](svfloat16_t op1, svfloat16_t op2) : "ZIP1 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right); + + + /// InterleaveOddInt128FromTwoInputs : Interleave odd quadwords from two inputs + + /// svfloat16_t svtrn2q[_f16](svfloat16_t op1, svfloat16_t op2) : "TRN2 Zresult.Q, Zop1.Q, Zop2.Q" + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right); + + + /// LoadVector : Unextended load + + /// svfloat16_t svld1[_f16](svbool_t pg, const float16_t *base) : "LD1H Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVector(Vector mask, half* address); + + + /// LoadVector128AndReplicateToVector : Load and replicate 128 bits of data + + /// svfloat16_t svld1rq[_f16](svbool_t pg, const float16_t *base) : "LD1RQH Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD1RQH Zresult.H, Pg/Z, [Xarray, #index * 2]" or "LD1RQH Zresult.H, Pg/Z, [Xbase, #0]" + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, half* address); + + + /// LoadVector256AndReplicateToVector : Load and replicate 256 bits of data + + /// svfloat16_t svld1ro[_f16](svbool_t pg, const float16_t *base) : "LD1ROH Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD1ROH Zresult.H, Pg/Z, [Xarray, #index * 2]" or "LD1ROH Zresult.H, Pg/Z, [Xbase, #0]" + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, half* address); + + + /// LoadVectorFirstFaulting : Unextended load, first-faulting + + /// svfloat16_t svldff1[_f16](svbool_t pg, const float16_t *base) : "LDFF1H Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1]" or "LDFF1H Zresult.H, Pg/Z, [Xbase, XZR, LSL #1]" + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, half* address); + + + /// LoadVectorNonFaulting : Unextended load, non-faulting + + /// svfloat16_t svldnf1[_f16](svbool_t pg, const float16_t *base) : "LDNF1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorNonFaulting(half* address); + + + /// LoadVectorNonTemporal : Unextended load, non-temporal + + /// svfloat16_t svldnt1[_f16](svbool_t pg, const float16_t *base) : "LDNT1H Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1]" or "LDNT1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorNonTemporal(Vector mask, half* address); + + + /// LoadVectorx2 : Load two-element tuples into two vectors + + /// svfloat16x2_t svld2[_f16](svbool_t pg, const float16_t *base) : "LD2H {Zresult0.H, Zresult1.H}, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD2H {Zresult0.H, Zresult1.H}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, half* address); + + + /// LoadVectorx3 : Load three-element tuples into three vectors + + /// svfloat16x3_t svld3[_f16](svbool_t pg, const float16_t *base) : "LD3H {Zresult0.H - Zresult2.H}, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD3H {Zresult0.H - Zresult2.H}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, half* address); + + + /// LoadVectorx4 : Load four-element tuples into four vectors + + /// svfloat16x4_t svld4[_f16](svbool_t pg, const float16_t *base) : "LD4H {Zresult0.H - Zresult3.H}, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD4H {Zresult0.H - Zresult3.H}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, half* address); + + + /// Log2 : Base 2 logarithm as integer + + /// svint16_t svlogb[_f16]_m(svint16_t inactive, svbool_t pg, svfloat16_t op) : "FLOGB Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; FLOGB Zresult.H, Pg/M, Zop.H" + /// svint16_t svlogb[_f16]_x(svbool_t pg, svfloat16_t op) : "FLOGB Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; FLOGB Zresult.H, Pg/M, Zop.H" + /// svint16_t svlogb[_f16]_z(svbool_t pg, svfloat16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; FLOGB Zresult.H, Pg/M, Zop.H" + public static unsafe Vector Log2(Vector value); + + + /// Max : Maximum + + /// svfloat16_t svmax[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FMAX Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; FMAX Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svfloat16_t svmax[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FMAX Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "FMAX Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; FMAX Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svfloat16_t svmax[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; FMAX Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; FMAX Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector Max(Vector left, Vector right); + + + /// MaxAcross : Maximum reduction to scalar + + /// float16_t svmaxv[_f16](svbool_t pg, svfloat16_t op) : "FMAXV Hresult, Pg, Zop.H" + public static unsafe Vector MaxAcross(Vector value); + + + /// MaxNumber : Maximum number + + /// svfloat16_t svmaxnm[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FMAXNM Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; FMAXNM Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svfloat16_t svmaxnm[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FMAXNM Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "FMAXNM Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; FMAXNM Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svfloat16_t svmaxnm[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; FMAXNM Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; FMAXNM Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector MaxNumber(Vector left, Vector right); + + + /// MaxNumberAcross : Maximum number reduction to scalar + + /// float16_t svmaxnmv[_f16](svbool_t pg, svfloat16_t op) : "FMAXNMV Hresult, Pg, Zop.H" + public static unsafe Vector MaxNumberAcross(Vector value); + + + /// MaxNumberPairwise : Maximum number pairwise + + /// svfloat16_t svmaxnmp[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FMAXNMP Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; FMAXNMP Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svfloat16_t svmaxnmp[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FMAXNMP Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; FMAXNMP Zresult.H, Pg/M, Zresult.H, Zop2.H" + public static unsafe Vector MaxNumberPairwise(Vector left, Vector right); + + + /// MaxPairwise : Maximum pairwise + + /// svfloat16_t svmaxp[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FMAXP Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; FMAXP Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svfloat16_t svmaxp[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FMAXP Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; FMAXP Zresult.H, Pg/M, Zresult.H, Zop2.H" + public static unsafe Vector MaxPairwise(Vector left, Vector right); + + + /// Min : Minimum + + /// svfloat16_t svmin[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FMIN Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; FMIN Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svfloat16_t svmin[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FMIN Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "FMIN Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; FMIN Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svfloat16_t svmin[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; FMIN Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; FMIN Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector Min(Vector left, Vector right); + + + /// MinAcross : Minimum reduction to scalar + + /// float16_t svminv[_f16](svbool_t pg, svfloat16_t op) : "FMINV Hresult, Pg, Zop.H" + public static unsafe Vector MinAcross(Vector value); + + + /// MinNumber : Minimum number + + /// svfloat16_t svminnm[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FMINNM Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; FMINNM Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svfloat16_t svminnm[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FMINNM Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "FMINNM Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; FMINNM Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svfloat16_t svminnm[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; FMINNM Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; FMINNM Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector MinNumber(Vector left, Vector right); + + + /// MinNumberAcross : Minimum number reduction to scalar + + /// float16_t svminnmv[_f16](svbool_t pg, svfloat16_t op) : "FMINNMV Hresult, Pg, Zop.H" + public static unsafe Vector MinNumberAcross(Vector value); + + + /// MinNumberPairwise : Minimum number pairwise + + /// svfloat16_t svminnmp[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FMINNMP Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; FMINNMP Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svfloat16_t svminnmp[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FMINNMP Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; FMINNMP Zresult.H, Pg/M, Zresult.H, Zop2.H" + public static unsafe Vector MinNumberPairwise(Vector left, Vector right); + + + /// MinPairwise : Minimum pairwise + + /// svfloat16_t svminp[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FMINP Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; FMINP Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svfloat16_t svminp[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FMINP Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; FMINP Zresult.H, Pg/M, Zresult.H, Zop2.H" + public static unsafe Vector MinPairwise(Vector left, Vector right); + + + /// Multiply : Multiply + + /// svfloat16_t svmul[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FMUL Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; FMUL Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svfloat16_t svmul[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FMUL Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "FMUL Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "FMUL Zresult.H, Zop1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; FMUL Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svfloat16_t svmul[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; FMUL Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; FMUL Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector Multiply(Vector left, Vector right); + + + /// MultiplyAddRotateComplex : Complex multiply-add with rotate + + /// svfloat16_t svcmla[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_rotation) : "FCMLA Ztied1.H, Pg/M, Zop2.H, Zop3.H, #imm_rotation" or "MOVPRFX Zresult, Zop1; FCMLA Zresult.H, Pg/M, Zop2.H, Zop3.H, #imm_rotation" + /// svfloat16_t svcmla[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_rotation) : "FCMLA Ztied1.H, Pg/M, Zop2.H, Zop3.H, #imm_rotation" or "MOVPRFX Zresult, Zop1; FCMLA Zresult.H, Pg/M, Zop2.H, Zop3.H, #imm_rotation" + /// svfloat16_t svcmla[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_rotation) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; FCMLA Zresult.H, Pg/M, Zop2.H, Zop3.H, #imm_rotation" + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation); + + + /// MultiplyAddRotateComplexBySelectedScalar : Complex multiply-add with rotate + + /// svfloat16_t svcmla_lane[_f16](svfloat16_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index, uint64_t imm_rotation) : "FCMLA Ztied1.H, Zop2.H, Zop3.H[imm_index], #imm_rotation" or "MOVPRFX Zresult, Zop1; FCMLA Zresult.H, Zop2.H, Zop3.H[imm_index], #imm_rotation" + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation); + + + /// MultiplyAddWideningLower : Multiply-add long (bottom) + + /// svfloat32_t svmlalb[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3) : "FMLALB Ztied1.S, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; FMLALB Zresult.S, Zop2.H, Zop3.H" + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3); + + /// svfloat32_t svmlalb_lane[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index) : "FMLALB Ztied1.S, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; FMLALB Zresult.S, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index); + + + /// MultiplyAddWideningUpper : Multiply-add long (top) + + /// svfloat32_t svmlalt[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3) : "FMLALT Ztied1.S, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; FMLALT Zresult.S, Zop2.H, Zop3.H" + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3); + + /// svfloat32_t svmlalt_lane[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index) : "FMLALT Ztied1.S, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; FMLALT Zresult.S, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index); + + + /// MultiplyBySelectedScalar : Multiply + + /// svfloat16_t svmul_lane[_f16](svfloat16_t op1, svfloat16_t op2, uint64_t imm_index) : "FMUL Zresult.H, Zop1.H, Zop2.H[imm_index]" + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex); + + + /// MultiplyExtended : Multiply extended (∞×0=2) + + /// svfloat16_t svmulx[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FMULX Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; FMULX Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svfloat16_t svmulx[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FMULX Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "FMULX Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; FMULX Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svfloat16_t svmulx[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; FMULX Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; FMULX Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector MultiplyExtended(Vector left, Vector right); + + + /// MultiplySubtractWideningLower : Multiply-subtract long (bottom) + + /// svfloat32_t svmlslb[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3) : "FMLSLB Ztied1.S, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; FMLSLB Zresult.S, Zop2.H, Zop3.H" + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3); + + /// svfloat32_t svmlslb_lane[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index) : "FMLSLB Ztied1.S, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; FMLSLB Zresult.S, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index); + + + /// MultiplySubtractWideningUpper : Multiply-subtract long (top) + + /// svfloat32_t svmlslt[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3) : "FMLSLT Ztied1.S, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; FMLSLT Zresult.S, Zop2.H, Zop3.H" + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3); + + /// svfloat32_t svmlslt_lane[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index) : "FMLSLT Ztied1.S, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; FMLSLT Zresult.S, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index); + + + /// Negate : Negate + + /// svfloat16_t svneg[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) : "FNEG Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; FNEG Zresult.H, Pg/M, Zop.H" + /// svfloat16_t svneg[_f16]_x(svbool_t pg, svfloat16_t op) : "FNEG Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; FNEG Zresult.H, Pg/M, Zop.H" + /// svfloat16_t svneg[_f16]_z(svbool_t pg, svfloat16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; FNEG Zresult.H, Pg/M, Zop.H" + public static unsafe Vector Negate(Vector value); + + + /// PopCount : Count nonzero bits + + /// svuint16_t svcnt[_f16]_m(svuint16_t inactive, svbool_t pg, svfloat16_t op) : "CNT Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; CNT Zresult.H, Pg/M, Zop.H" + /// svuint16_t svcnt[_f16]_x(svbool_t pg, svfloat16_t op) : "CNT Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; CNT Zresult.H, Pg/M, Zop.H" + /// svuint16_t svcnt[_f16]_z(svbool_t pg, svfloat16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; CNT Zresult.H, Pg/M, Zop.H" + public static unsafe Vector PopCount(Vector value); + + + /// ReciprocalEstimate : Reciprocal estimate + + /// svfloat16_t svrecpe[_f16](svfloat16_t op) : "FRECPE Zresult.H, Zop.H" + public static unsafe Vector ReciprocalEstimate(Vector value); + + + /// ReciprocalExponent : Reciprocal exponent + + /// svfloat16_t svrecpx[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) : "FRECPX Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; FRECPX Zresult.H, Pg/M, Zop.H" + /// svfloat16_t svrecpx[_f16]_x(svbool_t pg, svfloat16_t op) : "FRECPX Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; FRECPX Zresult.H, Pg/M, Zop.H" + /// svfloat16_t svrecpx[_f16]_z(svbool_t pg, svfloat16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; FRECPX Zresult.H, Pg/M, Zop.H" + public static unsafe Vector ReciprocalExponent(Vector value); + + + /// ReciprocalSqrtEstimate : Reciprocal square root estimate + + /// svfloat16_t svrsqrte[_f16](svfloat16_t op) : "FRSQRTE Zresult.H, Zop.H" + public static unsafe Vector ReciprocalSqrtEstimate(Vector value); + + + /// ReciprocalSqrtStep : Reciprocal square root step + + /// svfloat16_t svrsqrts[_f16](svfloat16_t op1, svfloat16_t op2) : "FRSQRTS Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector ReciprocalSqrtStep(Vector left, Vector right); + + + /// ReciprocalStep : Reciprocal step + + /// svfloat16_t svrecps[_f16](svfloat16_t op1, svfloat16_t op2) : "FRECPS Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector ReciprocalStep(Vector left, Vector right); + + + /// ReverseElement : Reverse all elements + + /// svfloat16_t svrev[_f16](svfloat16_t op) : "REV Zresult.H, Zop.H" + public static unsafe Vector ReverseElement(Vector value); + + + /// RoundAwayFromZero : Round to nearest, ties away from zero + + /// svfloat16_t svrinta[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) : "FRINTA Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; FRINTA Zresult.H, Pg/M, Zop.H" + /// svfloat16_t svrinta[_f16]_x(svbool_t pg, svfloat16_t op) : "FRINTA Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; FRINTA Zresult.H, Pg/M, Zop.H" + /// svfloat16_t svrinta[_f16]_z(svbool_t pg, svfloat16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; FRINTA Zresult.H, Pg/M, Zop.H" + public static unsafe Vector RoundAwayFromZero(Vector value); + + + /// RoundToNearest : Round to nearest, ties to even + + /// svfloat16_t svrintn[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) : "FRINTN Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; FRINTN Zresult.H, Pg/M, Zop.H" + /// svfloat16_t svrintn[_f16]_x(svbool_t pg, svfloat16_t op) : "FRINTN Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; FRINTN Zresult.H, Pg/M, Zop.H" + /// svfloat16_t svrintn[_f16]_z(svbool_t pg, svfloat16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; FRINTN Zresult.H, Pg/M, Zop.H" + public static unsafe Vector RoundToNearest(Vector value); + + + /// RoundToNegativeInfinity : Round towards -∞ + + /// svfloat16_t svrintm[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) : "FRINTM Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; FRINTM Zresult.H, Pg/M, Zop.H" + /// svfloat16_t svrintm[_f16]_x(svbool_t pg, svfloat16_t op) : "FRINTM Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; FRINTM Zresult.H, Pg/M, Zop.H" + /// svfloat16_t svrintm[_f16]_z(svbool_t pg, svfloat16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; FRINTM Zresult.H, Pg/M, Zop.H" + public static unsafe Vector RoundToNegativeInfinity(Vector value); + + + /// RoundToPositiveInfinity : Round towards +∞ + + /// svfloat16_t svrintp[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) : "FRINTP Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; FRINTP Zresult.H, Pg/M, Zop.H" + /// svfloat16_t svrintp[_f16]_x(svbool_t pg, svfloat16_t op) : "FRINTP Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; FRINTP Zresult.H, Pg/M, Zop.H" + /// svfloat16_t svrintp[_f16]_z(svbool_t pg, svfloat16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; FRINTP Zresult.H, Pg/M, Zop.H" + public static unsafe Vector RoundToPositiveInfinity(Vector value); + + + /// RoundToZero : Round towards zero + + /// svfloat16_t svrintz[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) : "FRINTZ Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; FRINTZ Zresult.H, Pg/M, Zop.H" + /// svfloat16_t svrintz[_f16]_x(svbool_t pg, svfloat16_t op) : "FRINTZ Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; FRINTZ Zresult.H, Pg/M, Zop.H" + /// svfloat16_t svrintz[_f16]_z(svbool_t pg, svfloat16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; FRINTZ Zresult.H, Pg/M, Zop.H" + public static unsafe Vector RoundToZero(Vector value); + + + /// Scale : Adjust exponent + + /// svfloat16_t svscale[_f16]_m(svbool_t pg, svfloat16_t op1, svint16_t op2) : "FSCALE Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; FSCALE Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svfloat16_t svscale[_f16]_x(svbool_t pg, svfloat16_t op1, svint16_t op2) : "FSCALE Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; FSCALE Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svfloat16_t svscale[_f16]_z(svbool_t pg, svfloat16_t op1, svint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; FSCALE Zresult.H, Pg/M, Zresult.H, Zop2.H" + public static unsafe Vector Scale(Vector left, Vector right); + + + /// Splice : Splice two vectors under predicate control + + /// svfloat16_t svsplice[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "SPLICE Ztied1.H, Pg, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; SPLICE Zresult.H, Pg, Zresult.H, Zop2.H" + public static unsafe Vector Splice(Vector mask, Vector left, Vector right); + + + /// Sqrt : Square root + + /// svfloat16_t svsqrt[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) : "FSQRT Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; FSQRT Zresult.H, Pg/M, Zop.H" + /// svfloat16_t svsqrt[_f16]_x(svbool_t pg, svfloat16_t op) : "FSQRT Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; FSQRT Zresult.H, Pg/M, Zop.H" + /// svfloat16_t svsqrt[_f16]_z(svbool_t pg, svfloat16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; FSQRT Zresult.H, Pg/M, Zop.H" + public static unsafe Vector Sqrt(Vector value); + + + /// Store : Non-truncating store + + /// void svst1[_f16](svbool_t pg, float16_t *base, svfloat16_t data) : "ST1H Zdata.H, Pg, [Xarray, Xindex, LSL #1]" or "ST1H Zdata.H, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, half* address, Vector data); + + /// void svst2[_f16](svbool_t pg, float16_t *base, svfloat16x2_t data) : "ST2H {Zdata0.H, Zdata1.H}, Pg, [Xarray, Xindex, LSL #1]" or "ST2H {Zdata0.H, Zdata1.H}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, half* address, (Vector Value1, Vector Value2) data); + + /// void svst3[_f16](svbool_t pg, float16_t *base, svfloat16x3_t data) : "ST3H {Zdata0.H - Zdata2.H}, Pg, [Xarray, Xindex, LSL #1]" or "ST3H {Zdata0.H - Zdata2.H}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, half* address, (Vector Value1, Vector Value2, Vector Value3) data); + + /// void svst4[_f16](svbool_t pg, float16_t *base, svfloat16x4_t data) : "ST4H {Zdata0.H - Zdata3.H}, Pg, [Xarray, Xindex, LSL #1]" or "ST4H {Zdata0.H - Zdata3.H}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, half* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data); + + + /// StoreNonTemporal : Non-truncating store, non-temporal + + /// void svstnt1[_f16](svbool_t pg, float16_t *base, svfloat16_t data) : "STNT1H Zdata.H, Pg, [Xarray, Xindex, LSL #1]" or "STNT1H Zdata.H, Pg, [Xbase, #0, MUL VL]" + public static unsafe void StoreNonTemporal(Vector mask, half* address, Vector data); + + + /// Subtract : Subtract + + /// svfloat16_t svsub[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FSUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; FSUB Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svfloat16_t svsub[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "FSUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "FSUBR Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "FSUB Zresult.H, Zop1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; FSUB Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svfloat16_t svsub[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; FSUB Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; FSUBR Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector Subtract(Vector left, Vector right); + + + /// TransposeEven : Interleave even elements from two inputs + + /// svfloat16_t svtrn1[_f16](svfloat16_t op1, svfloat16_t op2) : "TRN1 Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector TransposeEven(Vector left, Vector right); + + + /// TransposeOdd : Interleave odd elements from two inputs + + /// svfloat16_t svtrn2[_f16](svfloat16_t op1, svfloat16_t op2) : "TRN2 Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector TransposeOdd(Vector left, Vector right); + + + /// TrigonometricMultiplyAddCoefficient : Trigonometric multiply-add coefficient + + /// svfloat16_t svtmad[_f16](svfloat16_t op1, svfloat16_t op2, uint64_t imm3) : "FTMAD Ztied1.H, Ztied1.H, Zop2.H, #imm3" or "MOVPRFX Zresult, Zop1; FTMAD Zresult.H, Zresult.H, Zop2.H, #imm3" + public static unsafe Vector TrigonometricMultiplyAddCoefficient(Vector left, Vector right, [ConstantExpected] byte control); + + + /// TrigonometricSelectCoefficient : Trigonometric select coefficient + + /// svfloat16_t svtssel[_f16](svfloat16_t op1, svuint16_t op2) : "FTSSEL Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector TrigonometricSelectCoefficient(Vector value, Vector selector); + + + /// TrigonometricStartingValue : Trigonometric starting value + + /// svfloat16_t svtsmul[_f16](svfloat16_t op1, svuint16_t op2) : "FTSMUL Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector TrigonometricStartingValue(Vector value, Vector sign); + + + /// UnzipEven : Concatenate even elements from two inputs + + /// svfloat16_t svuzp1[_f16](svfloat16_t op1, svfloat16_t op2) : "UZP1 Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector UnzipEven(Vector left, Vector right); + + + /// UnzipOdd : Concatenate odd elements from two inputs + + /// svfloat16_t svuzp2[_f16](svfloat16_t op1, svfloat16_t op2) : "UZP2 Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector UnzipOdd(Vector left, Vector right); + + + /// UpConvertWideningUpper : Up convert long (top) + + /// svfloat32_t svcvtlt_f32[_f16]_m(svfloat32_t inactive, svbool_t pg, svfloat16_t op) : "FCVTLT Ztied.S, Pg/M, Zop.H" + /// svfloat32_t svcvtlt_f32[_f16]_x(svbool_t pg, svfloat16_t op) : "FCVTLT Ztied.S, Pg/M, Ztied.H" + public static unsafe Vector UpConvertWideningUpper(Vector value); + + + /// VectorTableLookup : Table lookup in single-vector table + + /// svfloat16_t svtbl[_f16](svfloat16_t data, svuint16_t indices) : "TBL Zresult.H, Zdata.H, Zindices.H" + public static unsafe Vector VectorTableLookup(Vector data, Vector indices); + + /// svfloat16_t svtbl2[_f16](svfloat16x2_t data, svuint16_t indices) : "TBL Zresult.H, {Zdata0.H, Zdata1.H}, Zindices.H" + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices); + + + /// VectorTableLookupExtension : Table lookup in single-vector table (merging) + + /// svfloat16_t svtbx[_f16](svfloat16_t fallback, svfloat16_t data, svuint16_t indices) : "TBX Ztied.H, Zdata.H, Zindices.H" + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices); + + + /// ZipHigh : Interleave elements from high halves of two inputs + + /// svfloat16_t svzip2[_f16](svfloat16_t op1, svfloat16_t op2) : "ZIP2 Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector ZipHigh(Vector left, Vector right); + + + /// ZipLow : Interleave elements from low halves of two inputs + + /// svfloat16_t svzip1[_f16](svfloat16_t op1, svfloat16_t op2) : "ZIP1 Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector ZipLow(Vector left, Vector right); + + + /// total method signatures: 138 + /// total method names: 134 +} + + /// Optional Entries: + /// public static unsafe Vector AbsoluteCompareGreaterThan(Vector left, half right); // svacgt[_n_f16] + /// public static unsafe Vector AbsoluteCompareGreaterThanOrEqual(Vector left, half right); // svacge[_n_f16] + /// public static unsafe Vector AbsoluteCompareLessThan(Vector left, half right); // svaclt[_n_f16] + /// public static unsafe Vector AbsoluteCompareLessThanOrEqual(Vector left, half right); // svacle[_n_f16] + /// public static unsafe Vector AbsoluteDifference(Vector left, half right); // svabd[_n_f16]_m or svabd[_n_f16]_x or svabd[_n_f16]_z + /// public static unsafe Vector Add(Vector left, half right); // svadd[_n_f16]_m or svadd[_n_f16]_x or svadd[_n_f16]_z + /// public static unsafe Vector CompareEqual(Vector left, half right); // svcmpeq[_n_f16] + /// public static unsafe Vector CompareGreaterThan(Vector left, half right); // svcmpgt[_n_f16] + /// public static unsafe Vector CompareGreaterThanOrEqual(Vector left, half right); // svcmpge[_n_f16] + /// public static unsafe Vector CompareLessThan(Vector left, half right); // svcmplt[_n_f16] + /// public static unsafe Vector CompareLessThanOrEqual(Vector left, half right); // svcmple[_n_f16] + /// public static unsafe Vector CompareNotEqualTo(Vector left, half right); // svcmpne[_n_f16] + /// public static unsafe Vector CompareUnordered(Vector left, half right); // svcmpuo[_n_f16] + /// public static unsafe half ConditionalExtractAfterLastActiveElement(Vector mask, half defaultValue, Vector data); // svclasta[_n_f16] + /// public static unsafe half ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, half defaultScalar, Vector data); // svclasta[_n_f16] + /// public static unsafe half ConditionalExtractLastActiveElement(Vector mask, half defaultValue, Vector data); // svclastb[_n_f16] + /// public static unsafe half ConditionalExtractLastActiveElementAndReplicate(Vector mask, half fallback, Vector data); // svclastb[_n_f16] + /// public static unsafe Vector Divide(Vector left, half right); // svdiv[_n_f16]_m or svdiv[_n_f16]_x or svdiv[_n_f16]_z + /// public static unsafe Vector Max(Vector left, half right); // svmax[_n_f16]_m or svmax[_n_f16]_x or svmax[_n_f16]_z + /// public static unsafe Vector MaxNumber(Vector left, half right); // svmaxnm[_n_f16]_m or svmaxnm[_n_f16]_x or svmaxnm[_n_f16]_z + /// public static unsafe Vector Min(Vector left, half right); // svmin[_n_f16]_m or svmin[_n_f16]_x or svmin[_n_f16]_z + /// public static unsafe Vector MinNumber(Vector left, half right); // svminnm[_n_f16]_m or svminnm[_n_f16]_x or svminnm[_n_f16]_z + /// public static unsafe Vector Multiply(Vector left, half right); // svmul[_n_f16]_m or svmul[_n_f16]_x or svmul[_n_f16]_z + /// public static unsafe Vector MultiplyAdd(Vector addend, Vector left, half right); // svmla[_n_f16]_m or svmla[_n_f16]_x or svmla[_n_f16]_z + /// public static unsafe Vector MultiplyAddNegated(Vector addend, Vector left, half right); // svnmla[_n_f16]_m or svnmla[_n_f16]_x or svnmla[_n_f16]_z + /// public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, half op3); // svmlalb[_n_f32] + /// public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, half op3); // svmlalt[_n_f32] + /// public static unsafe Vector MultiplyExtended(Vector left, half right); // svmulx[_n_f16]_m or svmulx[_n_f16]_x or svmulx[_n_f16]_z + /// public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, half right); // svmls[_n_f16]_m or svmls[_n_f16]_x or svmls[_n_f16]_z + /// public static unsafe Vector MultiplySubtractNegated(Vector minuend, Vector left, half right); // svnmls[_n_f16]_m or svnmls[_n_f16]_x or svnmls[_n_f16]_z + /// public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, half op3); // svmlslb[_n_f32] + /// public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, half op3); // svmlslt[_n_f32] + /// public static unsafe Vector Subtract(Vector left, half right); // svsub[_n_f16]_m or svsub[_n_f16]_x or svsub[_n_f16]_z + /// Total Maybe: 33 + + /// Rejected: + /// public static unsafe ulong CountElementsInAFullVector(Vector value); // svlen[_f16] + /// public static unsafe Vector CreateTrueMaskHalf(); // svptrue_b8 + /// public static unsafe Vector DivideReversed(Vector left, Vector right); // svdivr[_f16]_m or svdivr[_f16]_x or svdivr[_f16]_z + /// public static unsafe Vector DivideReversed(Vector left, half right); // svdivr[_n_f16]_m or svdivr[_n_f16]_x or svdivr[_n_f16]_z + /// public static unsafe Vector DuplicateSelectedScalarToVector(half value); // svdup[_n]_f16 or svdup[_n]_f16_m or svdup[_n]_f16_x or svdup[_n]_f16_z + /// public static unsafe Vector LoadVector(Vector mask, half* address, long vnum); // svld1_vnum[_f16] + /// public static unsafe Vector LoadVectorFirstFaulting(Vector mask, half* address, long vnum); // svldff1_vnum[_f16] + /// public static unsafe Vector LoadVectorNonFaulting(half* address, long vnum); // svldnf1_vnum[_f16] + /// public static unsafe Vector LoadVectorNonTemporal(Vector mask, half* address, long vnum); // svldnt1_vnum[_f16] + /// public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, half* address, long vnum); // svld2_vnum[_f16] + /// public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, half* address, long vnum); // svld3_vnum[_f16] + /// public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, half* address, long vnum); // svld4_vnum[_f16] + /// public static unsafe Vector MultiplyAddMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svmad[_f16]_m or svmad[_f16]_x or svmad[_f16]_z + /// public static unsafe Vector MultiplyAddMultiplicandFirst(Vector op1, Vector op2, half op3); // svmad[_n_f16]_m or svmad[_n_f16]_x or svmad[_n_f16]_z + /// public static unsafe Vector MultiplySubtractMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svmsb[_f16]_m or svmsb[_f16]_x or svmsb[_f16]_z + /// public static unsafe Vector MultiplySubtractMultiplicandFirst(Vector op1, Vector op2, half op3); // svmsb[_n_f16]_m or svmsb[_n_f16]_x or svmsb[_n_f16]_z + /// public static unsafe Vector NegateMultiplyAddMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svnmad[_f16]_m or svnmad[_f16]_x or svnmad[_f16]_z + /// public static unsafe Vector NegateMultiplyAddMultiplicandFirst(Vector op1, Vector op2, half op3); // svnmad[_n_f16]_m or svnmad[_n_f16]_x or svnmad[_n_f16]_z + /// public static unsafe Vector NegateMultiplySubtractMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svnmsb[_f16]_m or svnmsb[_f16]_x or svnmsb[_f16]_z + /// public static unsafe Vector NegateMultiplySubtractMultiplicandFirst(Vector op1, Vector op2, half op3); // svnmsb[_n_f16]_m or svnmsb[_n_f16]_x or svnmsb[_n_f16]_z + /// public static unsafe Vector RoundUsingCurrentRoundingModeExact(Vector value); // svrintx[_f16]_m or svrintx[_f16]_x or svrintx[_f16]_z + /// public static unsafe Vector RoundUsingCurrentRoundingModeInexact(Vector value); // svrinti[_f16]_m or svrinti[_f16]_x or svrinti[_f16]_z + /// public static unsafe Vector Scale(Vector left, short right); // svscale[_n_f16]_m or svscale[_n_f16]_x or svscale[_n_f16]_z + /// public static unsafe void Store(Vector mask, half* base, long vnum, Vector data); // svst1_vnum[_f16] + /// public static unsafe void Store(Vector mask, half* base, long vnum, (Vector data1, Vector data2)); // svst2_vnum[_f16] + /// public static unsafe void Store(Vector mask, half* base, long vnum, (Vector data1, Vector data2, Vector data3)); // svst3_vnum[_f16] + /// public static unsafe void Store(Vector mask, half* base, long vnum, (Vector data1, Vector data2, Vector data3, Vector data4)); // svst4_vnum[_f16] + /// public static unsafe void StoreNonTemporal(Vector mask, half* base, long vnum, Vector data); // svstnt1_vnum[_f16] + /// public static unsafe Vector SubtractReversed(Vector left, Vector right); // svsubr[_f16]_m or svsubr[_f16]_x or svsubr[_f16]_z + /// public static unsafe Vector SubtractReversed(Vector left, half right); // svsubr[_n_f16]_m or svsubr[_n_f16]_x or svsubr[_n_f16]_z + /// Total Rejected: 30 + + /// Total ACLE covered across API: 360 + diff --git a/sve_api/out_api/apiraw_FEAT_I8MM__.cs b/sve_api/out_api/apiraw_FEAT_I8MM__.cs new file mode 100644 index 0000000000000..e8a86b2f7dfb6 --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_I8MM__.cs @@ -0,0 +1,80 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class SveI8mm : AdvSimd /// Feature: FEAT_I8MM +{ + + public static unsafe Vector DotProductSignedUnsigned(Vector op1, Vector op2, Vector op3); // USDOT // MOVPRFX + + public static unsafe Vector DotProductSignedUnsigned(Vector op1, Vector op2, Vector op3, ulong imm_index); // SUDOT // MOVPRFX + + public static unsafe Vector DotProductUnsignedSigned(Vector op1, Vector op2, Vector op3); // USDOT // MOVPRFX + + public static unsafe Vector DotProductUnsignedSigned(Vector op1, Vector op2, Vector op3, ulong imm_index); // USDOT // MOVPRFX + + /// T: [int, sbyte], [uint, byte] + public static unsafe Vector MatrixMultiplyAccumulate(Vector op1, Vector op2, Vector op3); // SMMLA or UMMLA // MOVPRFX + + public static unsafe Vector MatrixMultiplyAccumulateUnsignedSigned(Vector op1, Vector op2, Vector op3); // USMMLA // MOVPRFX + + /// total method signatures: 6 + + + /// Optional Entries: + + public static unsafe Vector DotProductSignedUnsigned(Vector op1, Vector op2, byte op3); // USDOT // MOVPRFX + + public static unsafe Vector DotProductUnsignedSigned(Vector op1, Vector op2, sbyte op3); // USDOT // MOVPRFX + + /// total optional method signatures: 2 + +} + + +/// Full API +public abstract partial class SveI8mm : AdvSimd /// Feature: FEAT_I8MM +{ + /// DotProductSignedUnsigned : Dot product (signed × unsigned) + + /// svint32_t svsudot[_s32](svint32_t op1, svint8_t op2, svuint8_t op3) : "USDOT Ztied1.S, Zop3.B, Zop2.B" or "MOVPRFX Zresult, Zop1; USDOT Zresult.S, Zop3.B, Zop2.B" + public static unsafe Vector DotProductSignedUnsigned(Vector op1, Vector op2, Vector op3); + + /// svint32_t svsudot_lane[_s32](svint32_t op1, svint8_t op2, svuint8_t op3, uint64_t imm_index) : "SUDOT Ztied1.S, Zop2.B, Zop3.B[imm_index]" or "MOVPRFX Zresult, Zop1; SUDOT Zresult.S, Zop2.B, Zop3.B[imm_index]" + public static unsafe Vector DotProductSignedUnsigned(Vector op1, Vector op2, Vector op3, ulong imm_index); + + + /// DotProductUnsignedSigned : Dot product (unsigned × signed) + + /// svint32_t svusdot[_s32](svint32_t op1, svuint8_t op2, svint8_t op3) : "USDOT Ztied1.S, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; USDOT Zresult.S, Zop2.B, Zop3.B" + public static unsafe Vector DotProductUnsignedSigned(Vector op1, Vector op2, Vector op3); + + /// svint32_t svusdot_lane[_s32](svint32_t op1, svuint8_t op2, svint8_t op3, uint64_t imm_index) : "USDOT Ztied1.S, Zop2.B, Zop3.B[imm_index]" or "MOVPRFX Zresult, Zop1; USDOT Zresult.S, Zop2.B, Zop3.B[imm_index]" + public static unsafe Vector DotProductUnsignedSigned(Vector op1, Vector op2, Vector op3, ulong imm_index); + + + /// MatrixMultiplyAccumulate : Matrix multiply-accumulate + + /// svint32_t svmmla[_s32](svint32_t op1, svint8_t op2, svint8_t op3) : "SMMLA Ztied1.S, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; SMMLA Zresult.S, Zop2.B, Zop3.B" + public static unsafe Vector MatrixMultiplyAccumulate(Vector op1, Vector op2, Vector op3); + + /// svuint32_t svmmla[_u32](svuint32_t op1, svuint8_t op2, svuint8_t op3) : "UMMLA Ztied1.S, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; UMMLA Zresult.S, Zop2.B, Zop3.B" + public static unsafe Vector MatrixMultiplyAccumulate(Vector op1, Vector op2, Vector op3); + + + /// MatrixMultiplyAccumulateUnsignedSigned : Matrix multiply-accumulate (unsigned × signed) + + /// svint32_t svusmmla[_s32](svint32_t op1, svuint8_t op2, svint8_t op3) : "USMMLA Ztied1.S, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; USMMLA Zresult.S, Zop2.B, Zop3.B" + public static unsafe Vector MatrixMultiplyAccumulateUnsignedSigned(Vector op1, Vector op2, Vector op3); + + + /// total method signatures: 7 + /// total method names: 4 +} + + /// Optional Entries: + /// public static unsafe Vector DotProductSignedUnsigned(Vector op1, Vector op2, byte op3); // svsudot[_n_s32] + /// public static unsafe Vector DotProductUnsignedSigned(Vector op1, Vector op2, sbyte op3); // svusdot[_n_s32] + /// Total Maybe: 2 + + /// Total ACLE covered across API: 9 + diff --git a/sve_api/out_api/apiraw_FEAT_SHA3__.cs b/sve_api/out_api/apiraw_FEAT_SHA3__.cs new file mode 100644 index 0000000000000..ad75042a752ef --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_SHA3__.cs @@ -0,0 +1,97 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sha3 : AdvSimd /// Feature: FEAT_SHA3 +{ + + /// T: byte, ushort, uint, ulong, sbyte, short, int, long + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask); // BCAX + + public static unsafe Vector128 BitwiseRotateLeftBy1AndXor(Vector128 a, Vector128 b); // RAX1 + + /// T: byte, ushort, uint, ulong, sbyte, short, int, long + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3); // EOR3 + + public static unsafe Vector128 XorRotateRight(Vector128 left, Vector128 right, [ConstantExpected] byte count); // XAR + + /// total method signatures: 4 + +} + + +/// Full API +public abstract partial class Sha3 : AdvSimd /// Feature: FEAT_SHA3 +{ + /// BitwiseClearXor : Bit Clear and Exclusive OR performs a bitwise AND of the 128-bit vector in a source SIMD&FP register and the complement of the vector in another source SIMD&FP register, then performs a bitwise exclusive OR of the resulting vector and the vector in a third source SIMD&FP register, and writes the result to the destination SIMD&FP register. + + /// uint8x16_t vbcaxq_u8(uint8x16_t a, uint8x16_t b, uint8x16_t c) : "BCAX Vd.16B,Vn.16B,Vm.16B,Va.16B" + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask); + + /// uint16x8_t vbcaxq_u16(uint16x8_t a, uint16x8_t b, uint16x8_t c) : "BCAX Vd.16B,Vn.16B,Vm.16B,Va.16B" + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask); + + /// uint32x4_t vbcaxq_u32(uint32x4_t a, uint32x4_t b, uint32x4_t c) : "BCAX Vd.16B,Vn.16B,Vm.16B,Va.16B" + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask); + + /// uint64x2_t vbcaxq_u64(uint64x2_t a, uint64x2_t b, uint64x2_t c) : "BCAX Vd.16B,Vn.16B,Vm.16B,Va.16B" + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask); + + /// int8x16_t vbcaxq_s8(int8x16_t a, int8x16_t b, int8x16_t c) : "BCAX Vd.16B,Vn.16B,Vm.16B,Va.16B" + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask); + + /// int16x8_t vbcaxq_s16(int16x8_t a, int16x8_t b, int16x8_t c) : "BCAX Vd.16B,Vn.16B,Vm.16B,Va.16B" + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask); + + /// int32x4_t vbcaxq_s32(int32x4_t a, int32x4_t b, int32x4_t c) : "BCAX Vd.16B,Vn.16B,Vm.16B,Va.16B" + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask); + + /// int64x2_t vbcaxq_s64(int64x2_t a, int64x2_t b, int64x2_t c) : "BCAX Vd.16B,Vn.16B,Vm.16B,Va.16B" + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask); + + + /// BitwiseRotateLeftBy1AndXor : Rotate and Exclusive OR rotates each 64-bit element of the 128-bit vector in a source SIMD&FP register left by 1, performs a bitwise exclusive OR of the resulting 128-bit vector and the vector in another source SIMD&FP register, and writes the result to the destination SIMD&FP register. + + /// uint64x2_t vrax1q_u64(uint64x2_t a, uint64x2_t b) : "RAX1 Vd.2D,Vn.2D,Vm.2D" + public static unsafe Vector128 BitwiseRotateLeftBy1AndXor(Vector128 a, Vector128 b); + + + /// Xor : Three-way Exclusive OR performs a three-way exclusive OR of the values in the three source SIMD&FP registers, and writes the result to the destination SIMD&FP register. + + /// uint8x16_t veor3q_u8(uint8x16_t a, uint8x16_t b, uint8x16_t c) : "EOR3 Vd.16B,Vn.16B,Vm.16B,Va.16B" + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3); + + /// uint16x8_t veor3q_u16(uint16x8_t a, uint16x8_t b, uint16x8_t c) : "EOR3 Vd.16B,Vn.16B,Vm.16B,Va.16B" + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3); + + /// uint32x4_t veor3q_u32(uint32x4_t a, uint32x4_t b, uint32x4_t c) : "EOR3 Vd.16B,Vn.16B,Vm.16B,Va.16B" + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3); + + /// uint64x2_t veor3q_u64(uint64x2_t a, uint64x2_t b, uint64x2_t c) : "EOR3 Vd.16B,Vn.16B,Vm.16B,Va.16B" + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3); + + /// int8x16_t veor3q_s8(int8x16_t a, int8x16_t b, int8x16_t c) : "EOR3 Vd.16B,Vn.16B,Vm.16B,Va.16B" + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3); + + /// int16x8_t veor3q_s16(int16x8_t a, int16x8_t b, int16x8_t c) : "EOR3 Vd.16B,Vn.16B,Vm.16B,Va.16B" + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3); + + /// int32x4_t veor3q_s32(int32x4_t a, int32x4_t b, int32x4_t c) : "EOR3 Vd.16B,Vn.16B,Vm.16B,Va.16B" + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3); + + /// int64x2_t veor3q_s64(int64x2_t a, int64x2_t b, int64x2_t c) : "EOR3 Vd.16B,Vn.16B,Vm.16B,Va.16B" + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3); + + + /// XorRotateRight : Exclusive OR and Rotate performs a bitwise exclusive OR of the 128-bit vectors in the two source SIMD&FP registers, rotates each 64-bit element of the resulting 128-bit vector right by the value specified by a 6-bit immediate value, and writes the result to the destination SIMD&FP register. + + /// uint64x2_t vxarq_u64(uint64x2_t a, uint64x2_t b, const int imm6) : "XAR Vd.2D,Vn.2D,Vm.2D,imm6" + public static unsafe Vector128 XorRotateRight(Vector128 left, Vector128 right, [ConstantExpected] byte count); + + + /// total method signatures: 18 + /// total method names: 4 +} + + + /// Total ACLE covered across API: 18 + diff --git a/sve_api/out_api/apiraw_FEAT_SM4__.cs b/sve_api/out_api/apiraw_FEAT_SM4__.cs new file mode 100644 index 0000000000000..c927191afcb96 --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_SM4__.cs @@ -0,0 +1,37 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sm4 : AdvSimd /// Feature: FEAT_SM4 +{ + + public static unsafe Vector128 Sm4EncryptionAndDecryption(Vector128 a, Vector128 b); // SM4E + + public static unsafe Vector128 Sm4KeyUpdates(Vector128 a, Vector128 b); // SM4EKEY + + /// total method signatures: 2 + +} + + +/// Full API +public abstract partial class Sm4 : AdvSimd /// Feature: FEAT_SM4 +{ + /// Sm4EncryptionAndDecryption : SM4 Encode takes input data as a 128-bit vector from the first source SIMD&FP register, and four iterations of the round key held as the elements of the 128-bit vector in the second source SIMD&FP register. It encrypts the data by four rounds, in accordance with the SM4 standard, returning the 128-bit result to the destination SIMD&FP register. + + /// uint32x4_t vsm4eq_u32(uint32x4_t a, uint32x4_t b) : "SM4E Vd.4S,Vn.4S" + public static unsafe Vector128 Sm4EncryptionAndDecryption(Vector128 a, Vector128 b); + + + /// Sm4KeyUpdates : SM4 Key takes an input as a 128-bit vector from the first source SIMD&FP register and a 128-bit constant from the second SIMD&FP register. It derives four iterations of the output key, in accordance with the SM4 standard, returning the 128-bit result to the destination SIMD&FP register. + + /// uint32x4_t vsm4ekeyq_u32(uint32x4_t a, uint32x4_t b) : "SM4EKEY Vd.4S,Vn.4S,Vm.4S" + public static unsafe Vector128 Sm4KeyUpdates(Vector128 a, Vector128 b); + + + /// total method signatures: 2 + /// total method names: 2 +} + + + /// Total ACLE covered across API: 2 + diff --git a/sve_api/out_api/apiraw_FEAT_SVE2__.cs b/sve_api/out_api/apiraw_FEAT_SVE2__.cs new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/sve_api/out_api/apiraw_FEAT_SVE2__bitmanipulate.cs b/sve_api/out_api/apiraw_FEAT_SVE2__bitmanipulate.cs new file mode 100644 index 0000000000000..ec97b93cce3e3 --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_SVE2__bitmanipulate.cs @@ -0,0 +1,236 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve2 : AdvSimd /// Feature: FEAT_SVE2 Category: bitmanipulate +{ + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right); // EORBT // MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right); // EORTB // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector MoveWideningLower(Vector value); // SSHLLB or USHLLB + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector MoveWideningUpper(Vector value); // SSHLLT or USHLLT + + /// T: [float, uint], [double, ulong], [sbyte, byte], [short, ushort], [int, uint], [long, ulong] + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices); // TBL + + /// T: byte, ushort, uint, ulong + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices); // TBL + + /// T: [float, uint], [double, ulong], [sbyte, byte], [short, ushort], [int, uint], [long, ulong] + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices); // TBX + + /// T: byte, ushort, uint, ulong + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices); // TBX + + /// total method signatures: 8 + + + /// Optional Entries: + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, T right); // EORBT // MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, T right); // EORTB // MOVPRFX + + /// total optional method signatures: 2 + +} + + +/// Full API +public abstract partial class Sve2 : AdvSimd /// Feature: FEAT_SVE2 Category: bitmanipulate +{ + /// InterleavingXorLowerUpper : Interleaving exclusive OR (bottom, top) + + /// svint8_t sveorbt[_s8](svint8_t odd, svint8_t op1, svint8_t op2) : "EORBT Ztied.B, Zop1.B, Zop2.B" or "MOVPRFX Zresult, Zodd; EORBT Zresult.B, Zop1.B, Zop2.B" + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right); + + /// svint16_t sveorbt[_s16](svint16_t odd, svint16_t op1, svint16_t op2) : "EORBT Ztied.H, Zop1.H, Zop2.H" or "MOVPRFX Zresult, Zodd; EORBT Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right); + + /// svint32_t sveorbt[_s32](svint32_t odd, svint32_t op1, svint32_t op2) : "EORBT Ztied.S, Zop1.S, Zop2.S" or "MOVPRFX Zresult, Zodd; EORBT Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right); + + /// svint64_t sveorbt[_s64](svint64_t odd, svint64_t op1, svint64_t op2) : "EORBT Ztied.D, Zop1.D, Zop2.D" or "MOVPRFX Zresult, Zodd; EORBT Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right); + + /// svuint8_t sveorbt[_u8](svuint8_t odd, svuint8_t op1, svuint8_t op2) : "EORBT Ztied.B, Zop1.B, Zop2.B" or "MOVPRFX Zresult, Zodd; EORBT Zresult.B, Zop1.B, Zop2.B" + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right); + + /// svuint16_t sveorbt[_u16](svuint16_t odd, svuint16_t op1, svuint16_t op2) : "EORBT Ztied.H, Zop1.H, Zop2.H" or "MOVPRFX Zresult, Zodd; EORBT Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right); + + /// svuint32_t sveorbt[_u32](svuint32_t odd, svuint32_t op1, svuint32_t op2) : "EORBT Ztied.S, Zop1.S, Zop2.S" or "MOVPRFX Zresult, Zodd; EORBT Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right); + + /// svuint64_t sveorbt[_u64](svuint64_t odd, svuint64_t op1, svuint64_t op2) : "EORBT Ztied.D, Zop1.D, Zop2.D" or "MOVPRFX Zresult, Zodd; EORBT Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right); + + + /// InterleavingXorUpperLower : Interleaving exclusive OR (top, bottom) + + /// svint8_t sveortb[_s8](svint8_t even, svint8_t op1, svint8_t op2) : "EORTB Ztied.B, Zop1.B, Zop2.B" or "MOVPRFX Zresult, Zeven; EORTB Zresult.B, Zop1.B, Zop2.B" + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right); + + /// svint16_t sveortb[_s16](svint16_t even, svint16_t op1, svint16_t op2) : "EORTB Ztied.H, Zop1.H, Zop2.H" or "MOVPRFX Zresult, Zeven; EORTB Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right); + + /// svint32_t sveortb[_s32](svint32_t even, svint32_t op1, svint32_t op2) : "EORTB Ztied.S, Zop1.S, Zop2.S" or "MOVPRFX Zresult, Zeven; EORTB Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right); + + /// svint64_t sveortb[_s64](svint64_t even, svint64_t op1, svint64_t op2) : "EORTB Ztied.D, Zop1.D, Zop2.D" or "MOVPRFX Zresult, Zeven; EORTB Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right); + + /// svuint8_t sveortb[_u8](svuint8_t even, svuint8_t op1, svuint8_t op2) : "EORTB Ztied.B, Zop1.B, Zop2.B" or "MOVPRFX Zresult, Zeven; EORTB Zresult.B, Zop1.B, Zop2.B" + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right); + + /// svuint16_t sveortb[_u16](svuint16_t even, svuint16_t op1, svuint16_t op2) : "EORTB Ztied.H, Zop1.H, Zop2.H" or "MOVPRFX Zresult, Zeven; EORTB Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right); + + /// svuint32_t sveortb[_u32](svuint32_t even, svuint32_t op1, svuint32_t op2) : "EORTB Ztied.S, Zop1.S, Zop2.S" or "MOVPRFX Zresult, Zeven; EORTB Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right); + + /// svuint64_t sveortb[_u64](svuint64_t even, svuint64_t op1, svuint64_t op2) : "EORTB Ztied.D, Zop1.D, Zop2.D" or "MOVPRFX Zresult, Zeven; EORTB Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right); + + + /// MoveWideningLower : Move long (bottom) + + /// svint16_t svmovlb[_s16](svint8_t op) : "SSHLLB Zresult.H, Zop.B, #0" + public static unsafe Vector MoveWideningLower(Vector value); + + /// svint32_t svmovlb[_s32](svint16_t op) : "SSHLLB Zresult.S, Zop.H, #0" + public static unsafe Vector MoveWideningLower(Vector value); + + /// svint64_t svmovlb[_s64](svint32_t op) : "SSHLLB Zresult.D, Zop.S, #0" + public static unsafe Vector MoveWideningLower(Vector value); + + /// svuint16_t svmovlb[_u16](svuint8_t op) : "USHLLB Zresult.H, Zop.B, #0" + public static unsafe Vector MoveWideningLower(Vector value); + + /// svuint32_t svmovlb[_u32](svuint16_t op) : "USHLLB Zresult.S, Zop.H, #0" + public static unsafe Vector MoveWideningLower(Vector value); + + /// svuint64_t svmovlb[_u64](svuint32_t op) : "USHLLB Zresult.D, Zop.S, #0" + public static unsafe Vector MoveWideningLower(Vector value); + + + /// MoveWideningUpper : Move long (top) + + /// svint16_t svmovlt[_s16](svint8_t op) : "SSHLLT Zresult.H, Zop.B, #0" + public static unsafe Vector MoveWideningUpper(Vector value); + + /// svint32_t svmovlt[_s32](svint16_t op) : "SSHLLT Zresult.S, Zop.H, #0" + public static unsafe Vector MoveWideningUpper(Vector value); + + /// svint64_t svmovlt[_s64](svint32_t op) : "SSHLLT Zresult.D, Zop.S, #0" + public static unsafe Vector MoveWideningUpper(Vector value); + + /// svuint16_t svmovlt[_u16](svuint8_t op) : "USHLLT Zresult.H, Zop.B, #0" + public static unsafe Vector MoveWideningUpper(Vector value); + + /// svuint32_t svmovlt[_u32](svuint16_t op) : "USHLLT Zresult.S, Zop.H, #0" + public static unsafe Vector MoveWideningUpper(Vector value); + + /// svuint64_t svmovlt[_u64](svuint32_t op) : "USHLLT Zresult.D, Zop.S, #0" + public static unsafe Vector MoveWideningUpper(Vector value); + + + /// VectorTableLookup : Table lookup in two-vector table + + /// svfloat32_t svtbl2[_f32](svfloat32x2_t data, svuint32_t indices) : "TBL Zresult.S, {Zdata0.S, Zdata1.S}, Zindices.S" + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices); + + /// svfloat64_t svtbl2[_f64](svfloat64x2_t data, svuint64_t indices) : "TBL Zresult.D, {Zdata0.D, Zdata1.D}, Zindices.D" + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices); + + /// svint8_t svtbl2[_s8](svint8x2_t data, svuint8_t indices) : "TBL Zresult.B, {Zdata0.B, Zdata1.B}, Zindices.B" + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices); + + /// svint16_t svtbl2[_s16](svint16x2_t data, svuint16_t indices) : "TBL Zresult.H, {Zdata0.H, Zdata1.H}, Zindices.H" + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices); + + /// svint32_t svtbl2[_s32](svint32x2_t data, svuint32_t indices) : "TBL Zresult.S, {Zdata0.S, Zdata1.S}, Zindices.S" + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices); + + /// svint64_t svtbl2[_s64](svint64x2_t data, svuint64_t indices) : "TBL Zresult.D, {Zdata0.D, Zdata1.D}, Zindices.D" + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices); + + /// svuint8_t svtbl2[_u8](svuint8x2_t data, svuint8_t indices) : "TBL Zresult.B, {Zdata0.B, Zdata1.B}, Zindices.B" + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices); + + /// svuint16_t svtbl2[_u16](svuint16x2_t data, svuint16_t indices) : "TBL Zresult.H, {Zdata0.H, Zdata1.H}, Zindices.H" + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices); + + /// svuint32_t svtbl2[_u32](svuint32x2_t data, svuint32_t indices) : "TBL Zresult.S, {Zdata0.S, Zdata1.S}, Zindices.S" + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices); + + /// svuint64_t svtbl2[_u64](svuint64x2_t data, svuint64_t indices) : "TBL Zresult.D, {Zdata0.D, Zdata1.D}, Zindices.D" + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices); + + + /// VectorTableLookupExtension : Table lookup in single-vector table (merging) + + /// svfloat32_t svtbx[_f32](svfloat32_t fallback, svfloat32_t data, svuint32_t indices) : "TBX Ztied.S, Zdata.S, Zindices.S" + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices); + + /// svfloat64_t svtbx[_f64](svfloat64_t fallback, svfloat64_t data, svuint64_t indices) : "TBX Ztied.D, Zdata.D, Zindices.D" + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices); + + /// svint8_t svtbx[_s8](svint8_t fallback, svint8_t data, svuint8_t indices) : "TBX Ztied.B, Zdata.B, Zindices.B" + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices); + + /// svint16_t svtbx[_s16](svint16_t fallback, svint16_t data, svuint16_t indices) : "TBX Ztied.H, Zdata.H, Zindices.H" + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices); + + /// svint32_t svtbx[_s32](svint32_t fallback, svint32_t data, svuint32_t indices) : "TBX Ztied.S, Zdata.S, Zindices.S" + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices); + + /// svint64_t svtbx[_s64](svint64_t fallback, svint64_t data, svuint64_t indices) : "TBX Ztied.D, Zdata.D, Zindices.D" + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices); + + /// svuint8_t svtbx[_u8](svuint8_t fallback, svuint8_t data, svuint8_t indices) : "TBX Ztied.B, Zdata.B, Zindices.B" + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices); + + /// svuint16_t svtbx[_u16](svuint16_t fallback, svuint16_t data, svuint16_t indices) : "TBX Ztied.H, Zdata.H, Zindices.H" + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices); + + /// svuint32_t svtbx[_u32](svuint32_t fallback, svuint32_t data, svuint32_t indices) : "TBX Ztied.S, Zdata.S, Zindices.S" + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices); + + /// svuint64_t svtbx[_u64](svuint64_t fallback, svuint64_t data, svuint64_t indices) : "TBX Ztied.D, Zdata.D, Zindices.D" + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices); + + + /// total method signatures: 48 + /// total method names: 6 +} + + /// Optional Entries: + /// public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, sbyte right); // sveorbt[_n_s8] + /// public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, short right); // sveorbt[_n_s16] + /// public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, int right); // sveorbt[_n_s32] + /// public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, long right); // sveorbt[_n_s64] + /// public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, byte right); // sveorbt[_n_u8] + /// public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, ushort right); // sveorbt[_n_u16] + /// public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, uint right); // sveorbt[_n_u32] + /// public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, ulong right); // sveorbt[_n_u64] + /// public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, sbyte right); // sveortb[_n_s8] + /// public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, short right); // sveortb[_n_s16] + /// public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, int right); // sveortb[_n_s32] + /// public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, long right); // sveortb[_n_s64] + /// public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, byte right); // sveortb[_n_u8] + /// public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, ushort right); // sveortb[_n_u16] + /// public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, uint right); // sveortb[_n_u32] + /// public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, ulong right); // sveortb[_n_u64] + /// Total Maybe: 16 + + /// Total ACLE covered across API: 64 + diff --git a/sve_api/out_api/apiraw_FEAT_SVE2__bitwise.cs b/sve_api/out_api/apiraw_FEAT_SVE2__bitwise.cs new file mode 100644 index 0000000000000..d7da811560fa5 --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_SVE2__bitwise.cs @@ -0,0 +1,962 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve2 : AdvSimd /// Feature: FEAT_SVE2 Category: bitwise +{ + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask); // BCAX // MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right); // BSL // MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right); // BSL1N // MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right); // BSL2N // MOVPRFX + + /// T: sbyte, short, int, long + public static unsafe Vector ShiftArithmeticRounded(Vector value, Vector count); // SRSHL or SRSHLR // predicated, MOVPRFX + + /// T: sbyte, short, int, long + public static unsafe Vector ShiftArithmeticRoundedSaturate(Vector value, Vector count); // SQRSHL or SQRSHLR // predicated, MOVPRFX + + /// T: sbyte, short, int, long + public static unsafe Vector ShiftArithmeticSaturate(Vector value, Vector count); // SQSHL or SQSHLR // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift); // SLI + + /// T: [byte, sbyte], [ushort, short], [uint, int], [ulong, long] + public static unsafe Vector ShiftLeftLogicalSaturate(Vector value, Vector count); // UQSHL or UQSHLR // predicated, MOVPRFX + + /// T: [byte, sbyte], [ushort, short], [uint, int], [ulong, long] + public static unsafe Vector ShiftLeftLogicalSaturateUnsigned(Vector value, [ConstantExpected] byte count); // SQSHLU // predicated, MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector ShiftLeftLogicalWideningEven(Vector value, [ConstantExpected] byte count); // SSHLLB or USHLLB + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector ShiftLeftLogicalWideningOdd(Vector value, [ConstantExpected] byte count); // SSHLLT or USHLLT + + /// T: [byte, sbyte], [ushort, short], [uint, int], [ulong, long] + public static unsafe Vector ShiftLogicalRounded(Vector value, Vector count); // URSHL or URSHLR // predicated, MOVPRFX + + /// T: [byte, sbyte], [ushort, short], [uint, int], [ulong, long] + public static unsafe Vector ShiftLogicalRoundedSaturate(Vector value, Vector count); // UQRSHL or UQRSHLR // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift); // SRI + + /// T: sbyte, short, int, long + public static unsafe Vector ShiftRightArithmeticAdd(Vector addend, Vector value, [ConstantExpected] byte count); // SSRA // MOVPRFX + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateEven(Vector value, [ConstantExpected] byte count); // SQSHRNB or UQSHRNB + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count); // SQSHRNT or UQSHRNT + + /// T: [byte, short], [ushort, int], [uint, long] + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count); // SQSHRUNB + + /// T: [byte, short], [ushort, int], [uint, long] + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count); // SQSHRUNT + + /// T: sbyte, short, int, long + public static unsafe Vector ShiftRightArithmeticRounded(Vector value, [ConstantExpected] byte count); // SRSHR // predicated, MOVPRFX + + /// T: sbyte, short, int, long + public static unsafe Vector ShiftRightArithmeticRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count); // SRSRA // MOVPRFX + + /// T: [sbyte, short], [short, int], [int, long] + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count); // SQRSHRNB + + /// T: [sbyte, short], [short, int], [int, long] + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count); // SQRSHRNT + + /// T: [byte, short], [ushort, int], [uint, long] + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count); // SQRSHRUNB + + /// T: [byte, short], [ushort, int], [uint, long] + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count); // SQRSHRUNT + + /// T: byte, ushort, uint, ulong + public static unsafe Vector ShiftRightLogicalAdd(Vector addend, Vector value, [ConstantExpected] byte count); // USRA // MOVPRFX + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector ShiftRightLogicalNarrowingEven(Vector value, [ConstantExpected] byte count); // SHRNB + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector ShiftRightLogicalNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count); // SHRNT + + /// T: byte, ushort, uint, ulong + public static unsafe Vector ShiftRightLogicalRounded(Vector value, [ConstantExpected] byte count); // URSHR // predicated, MOVPRFX + + /// T: byte, ushort, uint, ulong + public static unsafe Vector ShiftRightLogicalRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count); // URSRA // MOVPRFX + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector ShiftRightLogicalRoundedNarrowingEven(Vector value, [ConstantExpected] byte count); // RSHRNB + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector ShiftRightLogicalRoundedNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count); // RSHRNT + + /// T: [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count); // UQRSHRNB + + /// T: [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count); // UQRSHRNT + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3); // EOR3 // MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count); // XAR // MOVPRFX + + /// total method signatures: 37 + +} + + +/// Full API +public abstract partial class Sve2 : AdvSimd /// Feature: FEAT_SVE2 Category: bitwise +{ + /// BitwiseClearXor : Bitwise clear and exclusive OR + + /// svint8_t svbcax[_s8](svint8_t op1, svint8_t op2, svint8_t op3) : "BCAX Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BCAX Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask); + + /// svint16_t svbcax[_s16](svint16_t op1, svint16_t op2, svint16_t op3) : "BCAX Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BCAX Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask); + + /// svint32_t svbcax[_s32](svint32_t op1, svint32_t op2, svint32_t op3) : "BCAX Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BCAX Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask); + + /// svint64_t svbcax[_s64](svint64_t op1, svint64_t op2, svint64_t op3) : "BCAX Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BCAX Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask); + + /// svuint8_t svbcax[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3) : "BCAX Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BCAX Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask); + + /// svuint16_t svbcax[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3) : "BCAX Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BCAX Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask); + + /// svuint32_t svbcax[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) : "BCAX Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BCAX Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask); + + /// svuint64_t svbcax[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) : "BCAX Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BCAX Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask); + + + /// BitwiseSelect : Bitwise select + + /// svint8_t svbsl[_s8](svint8_t op1, svint8_t op2, svint8_t op3) : "BSL Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BSL Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right); + + /// svint16_t svbsl[_s16](svint16_t op1, svint16_t op2, svint16_t op3) : "BSL Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BSL Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right); + + /// svint32_t svbsl[_s32](svint32_t op1, svint32_t op2, svint32_t op3) : "BSL Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BSL Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right); + + /// svint64_t svbsl[_s64](svint64_t op1, svint64_t op2, svint64_t op3) : "BSL Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BSL Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right); + + /// svuint8_t svbsl[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3) : "BSL Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BSL Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right); + + /// svuint16_t svbsl[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3) : "BSL Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BSL Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right); + + /// svuint32_t svbsl[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) : "BSL Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BSL Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right); + + /// svuint64_t svbsl[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) : "BSL Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BSL Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right); + + + /// BitwiseSelectLeftInverted : Bitwise select with first input inverted + + /// svint8_t svbsl1n[_s8](svint8_t op1, svint8_t op2, svint8_t op3) : "BSL1N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BSL1N Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right); + + /// svint16_t svbsl1n[_s16](svint16_t op1, svint16_t op2, svint16_t op3) : "BSL1N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BSL1N Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right); + + /// svint32_t svbsl1n[_s32](svint32_t op1, svint32_t op2, svint32_t op3) : "BSL1N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BSL1N Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right); + + /// svint64_t svbsl1n[_s64](svint64_t op1, svint64_t op2, svint64_t op3) : "BSL1N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BSL1N Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right); + + /// svuint8_t svbsl1n[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3) : "BSL1N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BSL1N Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right); + + /// svuint16_t svbsl1n[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3) : "BSL1N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BSL1N Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right); + + /// svuint32_t svbsl1n[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) : "BSL1N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BSL1N Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right); + + /// svuint64_t svbsl1n[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) : "BSL1N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BSL1N Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right); + + + /// BitwiseSelectRightInverted : Bitwise select with second input inverted + + /// svint8_t svbsl2n[_s8](svint8_t op1, svint8_t op2, svint8_t op3) : "BSL2N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BSL2N Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right); + + /// svint16_t svbsl2n[_s16](svint16_t op1, svint16_t op2, svint16_t op3) : "BSL2N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BSL2N Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right); + + /// svint32_t svbsl2n[_s32](svint32_t op1, svint32_t op2, svint32_t op3) : "BSL2N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BSL2N Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right); + + /// svint64_t svbsl2n[_s64](svint64_t op1, svint64_t op2, svint64_t op3) : "BSL2N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BSL2N Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right); + + /// svuint8_t svbsl2n[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3) : "BSL2N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BSL2N Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right); + + /// svuint16_t svbsl2n[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3) : "BSL2N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BSL2N Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right); + + /// svuint32_t svbsl2n[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) : "BSL2N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BSL2N Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right); + + /// svuint64_t svbsl2n[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) : "BSL2N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; BSL2N Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right); + + + /// ShiftArithmeticRounded : Rounding shift left + + /// svint8_t svrshl[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) : "SRSHL Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; SRSHL Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svrshl[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) : "SRSHL Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "SRSHLR Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; SRSHL Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svrshl[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; SRSHL Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; SRSHLR Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector ShiftArithmeticRounded(Vector value, Vector count); + + /// svint16_t svrshl[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) : "SRSHL Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; SRSHL Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svrshl[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) : "SRSHL Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "SRSHLR Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; SRSHL Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svrshl[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; SRSHL Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; SRSHLR Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector ShiftArithmeticRounded(Vector value, Vector count); + + /// svint32_t svrshl[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) : "SRSHL Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; SRSHL Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svrshl[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) : "SRSHL Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "SRSHLR Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; SRSHL Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svrshl[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; SRSHL Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; SRSHLR Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector ShiftArithmeticRounded(Vector value, Vector count); + + /// svint64_t svrshl[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) : "SRSHL Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; SRSHL Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svrshl[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) : "SRSHL Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "SRSHLR Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; SRSHL Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svrshl[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; SRSHL Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; SRSHLR Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector ShiftArithmeticRounded(Vector value, Vector count); + + + /// ShiftArithmeticRoundedSaturate : Saturating rounding shift left + + /// svint8_t svqrshl[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) : "SQRSHL Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; SQRSHL Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svqrshl[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) : "SQRSHL Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "SQRSHLR Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; SQRSHL Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svqrshl[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; SQRSHL Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; SQRSHLR Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector ShiftArithmeticRoundedSaturate(Vector value, Vector count); + + /// svint16_t svqrshl[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) : "SQRSHL Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; SQRSHL Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svqrshl[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) : "SQRSHL Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "SQRSHLR Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; SQRSHL Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svqrshl[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; SQRSHL Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; SQRSHLR Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector ShiftArithmeticRoundedSaturate(Vector value, Vector count); + + /// svint32_t svqrshl[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) : "SQRSHL Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; SQRSHL Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svqrshl[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) : "SQRSHL Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "SQRSHLR Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; SQRSHL Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svqrshl[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; SQRSHL Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; SQRSHLR Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector ShiftArithmeticRoundedSaturate(Vector value, Vector count); + + /// svint64_t svqrshl[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) : "SQRSHL Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; SQRSHL Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svqrshl[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) : "SQRSHL Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "SQRSHLR Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; SQRSHL Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svqrshl[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; SQRSHL Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; SQRSHLR Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector ShiftArithmeticRoundedSaturate(Vector value, Vector count); + + + /// ShiftArithmeticSaturate : Saturating shift left + + /// svint8_t svqshl[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) : "SQSHL Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; SQSHL Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svqshl[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) : "SQSHL Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "SQSHLR Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; SQSHL Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svqshl[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; SQSHL Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; SQSHLR Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector ShiftArithmeticSaturate(Vector value, Vector count); + + /// svint16_t svqshl[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) : "SQSHL Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; SQSHL Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svqshl[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) : "SQSHL Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "SQSHLR Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; SQSHL Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svqshl[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; SQSHL Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; SQSHLR Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector ShiftArithmeticSaturate(Vector value, Vector count); + + /// svint32_t svqshl[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) : "SQSHL Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; SQSHL Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svqshl[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) : "SQSHL Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "SQSHLR Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; SQSHL Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svqshl[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; SQSHL Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; SQSHLR Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector ShiftArithmeticSaturate(Vector value, Vector count); + + /// svint64_t svqshl[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) : "SQSHL Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; SQSHL Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svqshl[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) : "SQSHL Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "SQSHLR Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; SQSHL Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svqshl[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; SQSHL Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; SQSHLR Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector ShiftArithmeticSaturate(Vector value, Vector count); + + + /// ShiftLeftAndInsert : Shift left and insert + + /// svint8_t svsli[_n_s8](svint8_t op1, svint8_t op2, uint64_t imm3) : "SLI Ztied1.B, Zop2.B, #imm3" + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift); + + /// svint16_t svsli[_n_s16](svint16_t op1, svint16_t op2, uint64_t imm3) : "SLI Ztied1.H, Zop2.H, #imm3" + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift); + + /// svint32_t svsli[_n_s32](svint32_t op1, svint32_t op2, uint64_t imm3) : "SLI Ztied1.S, Zop2.S, #imm3" + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift); + + /// svint64_t svsli[_n_s64](svint64_t op1, svint64_t op2, uint64_t imm3) : "SLI Ztied1.D, Zop2.D, #imm3" + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift); + + /// svuint8_t svsli[_n_u8](svuint8_t op1, svuint8_t op2, uint64_t imm3) : "SLI Ztied1.B, Zop2.B, #imm3" + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift); + + /// svuint16_t svsli[_n_u16](svuint16_t op1, svuint16_t op2, uint64_t imm3) : "SLI Ztied1.H, Zop2.H, #imm3" + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift); + + /// svuint32_t svsli[_n_u32](svuint32_t op1, svuint32_t op2, uint64_t imm3) : "SLI Ztied1.S, Zop2.S, #imm3" + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift); + + /// svuint64_t svsli[_n_u64](svuint64_t op1, svuint64_t op2, uint64_t imm3) : "SLI Ztied1.D, Zop2.D, #imm3" + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift); + + + /// ShiftLeftLogicalSaturate : Saturating shift left + + /// svuint8_t svqshl[_u8]_m(svbool_t pg, svuint8_t op1, svint8_t op2) : "UQSHL Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; UQSHL Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svqshl[_u8]_x(svbool_t pg, svuint8_t op1, svint8_t op2) : "UQSHL Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "UQSHLR Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; UQSHL Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svqshl[_u8]_z(svbool_t pg, svuint8_t op1, svint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; UQSHL Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; UQSHLR Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector ShiftLeftLogicalSaturate(Vector value, Vector count); + + /// svuint16_t svqshl[_u16]_m(svbool_t pg, svuint16_t op1, svint16_t op2) : "UQSHL Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; UQSHL Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svqshl[_u16]_x(svbool_t pg, svuint16_t op1, svint16_t op2) : "UQSHL Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "UQSHLR Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; UQSHL Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svqshl[_u16]_z(svbool_t pg, svuint16_t op1, svint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; UQSHL Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; UQSHLR Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector ShiftLeftLogicalSaturate(Vector value, Vector count); + + /// svuint32_t svqshl[_u32]_m(svbool_t pg, svuint32_t op1, svint32_t op2) : "UQSHL Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; UQSHL Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svqshl[_u32]_x(svbool_t pg, svuint32_t op1, svint32_t op2) : "UQSHL Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "UQSHLR Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; UQSHL Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svqshl[_u32]_z(svbool_t pg, svuint32_t op1, svint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; UQSHL Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; UQSHLR Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector ShiftLeftLogicalSaturate(Vector value, Vector count); + + /// svuint64_t svqshl[_u64]_m(svbool_t pg, svuint64_t op1, svint64_t op2) : "UQSHL Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; UQSHL Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svqshl[_u64]_x(svbool_t pg, svuint64_t op1, svint64_t op2) : "UQSHL Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "UQSHLR Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; UQSHL Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svqshl[_u64]_z(svbool_t pg, svuint64_t op1, svint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; UQSHL Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; UQSHLR Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector ShiftLeftLogicalSaturate(Vector value, Vector count); + + + /// ShiftLeftLogicalSaturateUnsigned : Saturating shift left unsigned + + /// svuint8_t svqshlu[_n_s8]_m(svbool_t pg, svint8_t op1, uint64_t imm2) : "SQSHLU Ztied1.B, Pg/M, Ztied1.B, #imm2" or "MOVPRFX Zresult, Zop1; SQSHLU Zresult.B, Pg/M, Zresult.B, #imm2" + /// svuint8_t svqshlu[_n_s8]_x(svbool_t pg, svint8_t op1, uint64_t imm2) : "SQSHLU Ztied1.B, Pg/M, Ztied1.B, #imm2" or "MOVPRFX Zresult, Zop1; SQSHLU Zresult.B, Pg/M, Zresult.B, #imm2" + /// svuint8_t svqshlu[_n_s8]_z(svbool_t pg, svint8_t op1, uint64_t imm2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; SQSHLU Zresult.B, Pg/M, Zresult.B, #imm2" + public static unsafe Vector ShiftLeftLogicalSaturateUnsigned(Vector value, [ConstantExpected] byte count); + + /// svuint16_t svqshlu[_n_s16]_m(svbool_t pg, svint16_t op1, uint64_t imm2) : "SQSHLU Ztied1.H, Pg/M, Ztied1.H, #imm2" or "MOVPRFX Zresult, Zop1; SQSHLU Zresult.H, Pg/M, Zresult.H, #imm2" + /// svuint16_t svqshlu[_n_s16]_x(svbool_t pg, svint16_t op1, uint64_t imm2) : "SQSHLU Ztied1.H, Pg/M, Ztied1.H, #imm2" or "MOVPRFX Zresult, Zop1; SQSHLU Zresult.H, Pg/M, Zresult.H, #imm2" + /// svuint16_t svqshlu[_n_s16]_z(svbool_t pg, svint16_t op1, uint64_t imm2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; SQSHLU Zresult.H, Pg/M, Zresult.H, #imm2" + public static unsafe Vector ShiftLeftLogicalSaturateUnsigned(Vector value, [ConstantExpected] byte count); + + /// svuint32_t svqshlu[_n_s32]_m(svbool_t pg, svint32_t op1, uint64_t imm2) : "SQSHLU Ztied1.S, Pg/M, Ztied1.S, #imm2" or "MOVPRFX Zresult, Zop1; SQSHLU Zresult.S, Pg/M, Zresult.S, #imm2" + /// svuint32_t svqshlu[_n_s32]_x(svbool_t pg, svint32_t op1, uint64_t imm2) : "SQSHLU Ztied1.S, Pg/M, Ztied1.S, #imm2" or "MOVPRFX Zresult, Zop1; SQSHLU Zresult.S, Pg/M, Zresult.S, #imm2" + /// svuint32_t svqshlu[_n_s32]_z(svbool_t pg, svint32_t op1, uint64_t imm2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; SQSHLU Zresult.S, Pg/M, Zresult.S, #imm2" + public static unsafe Vector ShiftLeftLogicalSaturateUnsigned(Vector value, [ConstantExpected] byte count); + + /// svuint64_t svqshlu[_n_s64]_m(svbool_t pg, svint64_t op1, uint64_t imm2) : "SQSHLU Ztied1.D, Pg/M, Ztied1.D, #imm2" or "MOVPRFX Zresult, Zop1; SQSHLU Zresult.D, Pg/M, Zresult.D, #imm2" + /// svuint64_t svqshlu[_n_s64]_x(svbool_t pg, svint64_t op1, uint64_t imm2) : "SQSHLU Ztied1.D, Pg/M, Ztied1.D, #imm2" or "MOVPRFX Zresult, Zop1; SQSHLU Zresult.D, Pg/M, Zresult.D, #imm2" + /// svuint64_t svqshlu[_n_s64]_z(svbool_t pg, svint64_t op1, uint64_t imm2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; SQSHLU Zresult.D, Pg/M, Zresult.D, #imm2" + public static unsafe Vector ShiftLeftLogicalSaturateUnsigned(Vector value, [ConstantExpected] byte count); + + + /// ShiftLeftLogicalWideningEven : Shift left long (bottom) + + /// svint16_t svshllb[_n_s16](svint8_t op1, uint64_t imm2) : "SSHLLB Zresult.H, Zop1.B, #imm2" + public static unsafe Vector ShiftLeftLogicalWideningEven(Vector value, [ConstantExpected] byte count); + + /// svint32_t svshllb[_n_s32](svint16_t op1, uint64_t imm2) : "SSHLLB Zresult.S, Zop1.H, #imm2" + public static unsafe Vector ShiftLeftLogicalWideningEven(Vector value, [ConstantExpected] byte count); + + /// svint64_t svshllb[_n_s64](svint32_t op1, uint64_t imm2) : "SSHLLB Zresult.D, Zop1.S, #imm2" + public static unsafe Vector ShiftLeftLogicalWideningEven(Vector value, [ConstantExpected] byte count); + + /// svuint16_t svshllb[_n_u16](svuint8_t op1, uint64_t imm2) : "USHLLB Zresult.H, Zop1.B, #imm2" + public static unsafe Vector ShiftLeftLogicalWideningEven(Vector value, [ConstantExpected] byte count); + + /// svuint32_t svshllb[_n_u32](svuint16_t op1, uint64_t imm2) : "USHLLB Zresult.S, Zop1.H, #imm2" + public static unsafe Vector ShiftLeftLogicalWideningEven(Vector value, [ConstantExpected] byte count); + + /// svuint64_t svshllb[_n_u64](svuint32_t op1, uint64_t imm2) : "USHLLB Zresult.D, Zop1.S, #imm2" + public static unsafe Vector ShiftLeftLogicalWideningEven(Vector value, [ConstantExpected] byte count); + + + /// ShiftLeftLogicalWideningOdd : Shift left long (top) + + /// svint16_t svshllt[_n_s16](svint8_t op1, uint64_t imm2) : "SSHLLT Zresult.H, Zop1.B, #imm2" + public static unsafe Vector ShiftLeftLogicalWideningOdd(Vector value, [ConstantExpected] byte count); + + /// svint32_t svshllt[_n_s32](svint16_t op1, uint64_t imm2) : "SSHLLT Zresult.S, Zop1.H, #imm2" + public static unsafe Vector ShiftLeftLogicalWideningOdd(Vector value, [ConstantExpected] byte count); + + /// svint64_t svshllt[_n_s64](svint32_t op1, uint64_t imm2) : "SSHLLT Zresult.D, Zop1.S, #imm2" + public static unsafe Vector ShiftLeftLogicalWideningOdd(Vector value, [ConstantExpected] byte count); + + /// svuint16_t svshllt[_n_u16](svuint8_t op1, uint64_t imm2) : "USHLLT Zresult.H, Zop1.B, #imm2" + public static unsafe Vector ShiftLeftLogicalWideningOdd(Vector value, [ConstantExpected] byte count); + + /// svuint32_t svshllt[_n_u32](svuint16_t op1, uint64_t imm2) : "USHLLT Zresult.S, Zop1.H, #imm2" + public static unsafe Vector ShiftLeftLogicalWideningOdd(Vector value, [ConstantExpected] byte count); + + /// svuint64_t svshllt[_n_u64](svuint32_t op1, uint64_t imm2) : "USHLLT Zresult.D, Zop1.S, #imm2" + public static unsafe Vector ShiftLeftLogicalWideningOdd(Vector value, [ConstantExpected] byte count); + + + /// ShiftLogicalRounded : Rounding shift left + + /// svuint8_t svrshl[_u8]_m(svbool_t pg, svuint8_t op1, svint8_t op2) : "URSHL Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; URSHL Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svrshl[_u8]_x(svbool_t pg, svuint8_t op1, svint8_t op2) : "URSHL Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "URSHLR Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; URSHL Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svrshl[_u8]_z(svbool_t pg, svuint8_t op1, svint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; URSHL Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; URSHLR Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector ShiftLogicalRounded(Vector value, Vector count); + + /// svuint16_t svrshl[_u16]_m(svbool_t pg, svuint16_t op1, svint16_t op2) : "URSHL Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; URSHL Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svrshl[_u16]_x(svbool_t pg, svuint16_t op1, svint16_t op2) : "URSHL Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "URSHLR Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; URSHL Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svrshl[_u16]_z(svbool_t pg, svuint16_t op1, svint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; URSHL Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; URSHLR Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector ShiftLogicalRounded(Vector value, Vector count); + + /// svuint32_t svrshl[_u32]_m(svbool_t pg, svuint32_t op1, svint32_t op2) : "URSHL Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; URSHL Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svrshl[_u32]_x(svbool_t pg, svuint32_t op1, svint32_t op2) : "URSHL Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "URSHLR Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; URSHL Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svrshl[_u32]_z(svbool_t pg, svuint32_t op1, svint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; URSHL Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; URSHLR Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector ShiftLogicalRounded(Vector value, Vector count); + + /// svuint64_t svrshl[_u64]_m(svbool_t pg, svuint64_t op1, svint64_t op2) : "URSHL Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; URSHL Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svrshl[_u64]_x(svbool_t pg, svuint64_t op1, svint64_t op2) : "URSHL Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "URSHLR Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; URSHL Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svrshl[_u64]_z(svbool_t pg, svuint64_t op1, svint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; URSHL Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; URSHLR Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector ShiftLogicalRounded(Vector value, Vector count); + + + /// ShiftLogicalRoundedSaturate : Saturating rounding shift left + + /// svuint8_t svqrshl[_u8]_m(svbool_t pg, svuint8_t op1, svint8_t op2) : "UQRSHL Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; UQRSHL Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svqrshl[_u8]_x(svbool_t pg, svuint8_t op1, svint8_t op2) : "UQRSHL Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "UQRSHLR Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; UQRSHL Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svqrshl[_u8]_z(svbool_t pg, svuint8_t op1, svint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; UQRSHL Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; UQRSHLR Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector ShiftLogicalRoundedSaturate(Vector value, Vector count); + + /// svuint16_t svqrshl[_u16]_m(svbool_t pg, svuint16_t op1, svint16_t op2) : "UQRSHL Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; UQRSHL Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svqrshl[_u16]_x(svbool_t pg, svuint16_t op1, svint16_t op2) : "UQRSHL Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "UQRSHLR Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; UQRSHL Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svqrshl[_u16]_z(svbool_t pg, svuint16_t op1, svint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; UQRSHL Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; UQRSHLR Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector ShiftLogicalRoundedSaturate(Vector value, Vector count); + + /// svuint32_t svqrshl[_u32]_m(svbool_t pg, svuint32_t op1, svint32_t op2) : "UQRSHL Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; UQRSHL Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svqrshl[_u32]_x(svbool_t pg, svuint32_t op1, svint32_t op2) : "UQRSHL Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "UQRSHLR Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; UQRSHL Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svqrshl[_u32]_z(svbool_t pg, svuint32_t op1, svint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; UQRSHL Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; UQRSHLR Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector ShiftLogicalRoundedSaturate(Vector value, Vector count); + + /// svuint64_t svqrshl[_u64]_m(svbool_t pg, svuint64_t op1, svint64_t op2) : "UQRSHL Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; UQRSHL Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svqrshl[_u64]_x(svbool_t pg, svuint64_t op1, svint64_t op2) : "UQRSHL Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "UQRSHLR Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; UQRSHL Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svqrshl[_u64]_z(svbool_t pg, svuint64_t op1, svint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; UQRSHL Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; UQRSHLR Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector ShiftLogicalRoundedSaturate(Vector value, Vector count); + + + /// ShiftRightAndInsert : Shift right and insert + + /// svint8_t svsri[_n_s8](svint8_t op1, svint8_t op2, uint64_t imm3) : "SRI Ztied1.B, Zop2.B, #imm3" + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift); + + /// svint16_t svsri[_n_s16](svint16_t op1, svint16_t op2, uint64_t imm3) : "SRI Ztied1.H, Zop2.H, #imm3" + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift); + + /// svint32_t svsri[_n_s32](svint32_t op1, svint32_t op2, uint64_t imm3) : "SRI Ztied1.S, Zop2.S, #imm3" + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift); + + /// svint64_t svsri[_n_s64](svint64_t op1, svint64_t op2, uint64_t imm3) : "SRI Ztied1.D, Zop2.D, #imm3" + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift); + + /// svuint8_t svsri[_n_u8](svuint8_t op1, svuint8_t op2, uint64_t imm3) : "SRI Ztied1.B, Zop2.B, #imm3" + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift); + + /// svuint16_t svsri[_n_u16](svuint16_t op1, svuint16_t op2, uint64_t imm3) : "SRI Ztied1.H, Zop2.H, #imm3" + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift); + + /// svuint32_t svsri[_n_u32](svuint32_t op1, svuint32_t op2, uint64_t imm3) : "SRI Ztied1.S, Zop2.S, #imm3" + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift); + + /// svuint64_t svsri[_n_u64](svuint64_t op1, svuint64_t op2, uint64_t imm3) : "SRI Ztied1.D, Zop2.D, #imm3" + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift); + + + /// ShiftRightArithmeticAdd : Shift right and accumulate + + /// svint8_t svsra[_n_s8](svint8_t op1, svint8_t op2, uint64_t imm3) : "SSRA Ztied1.B, Zop2.B, #imm3" or "MOVPRFX Zresult, Zop1; SSRA Zresult.B, Zop2.B, #imm3" + public static unsafe Vector ShiftRightArithmeticAdd(Vector addend, Vector value, [ConstantExpected] byte count); + + /// svint16_t svsra[_n_s16](svint16_t op1, svint16_t op2, uint64_t imm3) : "SSRA Ztied1.H, Zop2.H, #imm3" or "MOVPRFX Zresult, Zop1; SSRA Zresult.H, Zop2.H, #imm3" + public static unsafe Vector ShiftRightArithmeticAdd(Vector addend, Vector value, [ConstantExpected] byte count); + + /// svint32_t svsra[_n_s32](svint32_t op1, svint32_t op2, uint64_t imm3) : "SSRA Ztied1.S, Zop2.S, #imm3" or "MOVPRFX Zresult, Zop1; SSRA Zresult.S, Zop2.S, #imm3" + public static unsafe Vector ShiftRightArithmeticAdd(Vector addend, Vector value, [ConstantExpected] byte count); + + /// svint64_t svsra[_n_s64](svint64_t op1, svint64_t op2, uint64_t imm3) : "SSRA Ztied1.D, Zop2.D, #imm3" or "MOVPRFX Zresult, Zop1; SSRA Zresult.D, Zop2.D, #imm3" + public static unsafe Vector ShiftRightArithmeticAdd(Vector addend, Vector value, [ConstantExpected] byte count); + + + /// ShiftRightArithmeticNarrowingSaturateEven : Saturating shift right narrow (bottom) + + /// svint8_t svqshrnb[_n_s16](svint16_t op1, uint64_t imm2) : "SQSHRNB Zresult.B, Zop1.H, #imm2" + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateEven(Vector value, [ConstantExpected] byte count); + + /// svint16_t svqshrnb[_n_s32](svint32_t op1, uint64_t imm2) : "SQSHRNB Zresult.H, Zop1.S, #imm2" + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateEven(Vector value, [ConstantExpected] byte count); + + /// svint32_t svqshrnb[_n_s64](svint64_t op1, uint64_t imm2) : "SQSHRNB Zresult.S, Zop1.D, #imm2" + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateEven(Vector value, [ConstantExpected] byte count); + + /// svuint8_t svqshrnb[_n_u16](svuint16_t op1, uint64_t imm2) : "UQSHRNB Zresult.B, Zop1.H, #imm2" + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateEven(Vector value, [ConstantExpected] byte count); + + /// svuint16_t svqshrnb[_n_u32](svuint32_t op1, uint64_t imm2) : "UQSHRNB Zresult.H, Zop1.S, #imm2" + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateEven(Vector value, [ConstantExpected] byte count); + + /// svuint32_t svqshrnb[_n_u64](svuint64_t op1, uint64_t imm2) : "UQSHRNB Zresult.S, Zop1.D, #imm2" + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateEven(Vector value, [ConstantExpected] byte count); + + + /// ShiftRightArithmeticNarrowingSaturateOdd : Saturating shift right narrow (top) + + /// svint8_t svqshrnt[_n_s16](svint8_t even, svint16_t op1, uint64_t imm2) : "SQSHRNT Ztied.B, Zop1.H, #imm2" + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count); + + /// svint16_t svqshrnt[_n_s32](svint16_t even, svint32_t op1, uint64_t imm2) : "SQSHRNT Ztied.H, Zop1.S, #imm2" + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count); + + /// svint32_t svqshrnt[_n_s64](svint32_t even, svint64_t op1, uint64_t imm2) : "SQSHRNT Ztied.S, Zop1.D, #imm2" + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count); + + /// svuint8_t svqshrnt[_n_u16](svuint8_t even, svuint16_t op1, uint64_t imm2) : "UQSHRNT Ztied.B, Zop1.H, #imm2" + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count); + + /// svuint16_t svqshrnt[_n_u32](svuint16_t even, svuint32_t op1, uint64_t imm2) : "UQSHRNT Ztied.H, Zop1.S, #imm2" + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count); + + /// svuint32_t svqshrnt[_n_u64](svuint32_t even, svuint64_t op1, uint64_t imm2) : "UQSHRNT Ztied.S, Zop1.D, #imm2" + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count); + + + /// ShiftRightArithmeticNarrowingSaturateUnsignedEven : Saturating shift right unsigned narrow (bottom) + + /// svuint8_t svqshrunb[_n_s16](svint16_t op1, uint64_t imm2) : "SQSHRUNB Zresult.B, Zop1.H, #imm2" + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count); + + /// svuint16_t svqshrunb[_n_s32](svint32_t op1, uint64_t imm2) : "SQSHRUNB Zresult.H, Zop1.S, #imm2" + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count); + + /// svuint32_t svqshrunb[_n_s64](svint64_t op1, uint64_t imm2) : "SQSHRUNB Zresult.S, Zop1.D, #imm2" + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count); + + + /// ShiftRightArithmeticNarrowingSaturateUnsignedOdd : Saturating shift right unsigned narrow (top) + + /// svuint8_t svqshrunt[_n_s16](svuint8_t even, svint16_t op1, uint64_t imm2) : "SQSHRUNT Ztied.B, Zop1.H, #imm2" + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count); + + /// svuint16_t svqshrunt[_n_s32](svuint16_t even, svint32_t op1, uint64_t imm2) : "SQSHRUNT Ztied.H, Zop1.S, #imm2" + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count); + + /// svuint32_t svqshrunt[_n_s64](svuint32_t even, svint64_t op1, uint64_t imm2) : "SQSHRUNT Ztied.S, Zop1.D, #imm2" + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count); + + + /// ShiftRightArithmeticRounded : Rounding shift right + + /// svint8_t svrshr[_n_s8]_m(svbool_t pg, svint8_t op1, uint64_t imm2) : "SRSHR Ztied1.B, Pg/M, Ztied1.B, #imm2" or "MOVPRFX Zresult, Zop1; SRSHR Zresult.B, Pg/M, Zresult.B, #imm2" + /// svint8_t svrshr[_n_s8]_x(svbool_t pg, svint8_t op1, uint64_t imm2) : "SRSHR Ztied1.B, Pg/M, Ztied1.B, #imm2" or "MOVPRFX Zresult, Zop1; SRSHR Zresult.B, Pg/M, Zresult.B, #imm2" + /// svint8_t svrshr[_n_s8]_z(svbool_t pg, svint8_t op1, uint64_t imm2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; SRSHR Zresult.B, Pg/M, Zresult.B, #imm2" + public static unsafe Vector ShiftRightArithmeticRounded(Vector value, [ConstantExpected] byte count); + + /// svint16_t svrshr[_n_s16]_m(svbool_t pg, svint16_t op1, uint64_t imm2) : "SRSHR Ztied1.H, Pg/M, Ztied1.H, #imm2" or "MOVPRFX Zresult, Zop1; SRSHR Zresult.H, Pg/M, Zresult.H, #imm2" + /// svint16_t svrshr[_n_s16]_x(svbool_t pg, svint16_t op1, uint64_t imm2) : "SRSHR Ztied1.H, Pg/M, Ztied1.H, #imm2" or "MOVPRFX Zresult, Zop1; SRSHR Zresult.H, Pg/M, Zresult.H, #imm2" + /// svint16_t svrshr[_n_s16]_z(svbool_t pg, svint16_t op1, uint64_t imm2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; SRSHR Zresult.H, Pg/M, Zresult.H, #imm2" + public static unsafe Vector ShiftRightArithmeticRounded(Vector value, [ConstantExpected] byte count); + + /// svint32_t svrshr[_n_s32]_m(svbool_t pg, svint32_t op1, uint64_t imm2) : "SRSHR Ztied1.S, Pg/M, Ztied1.S, #imm2" or "MOVPRFX Zresult, Zop1; SRSHR Zresult.S, Pg/M, Zresult.S, #imm2" + /// svint32_t svrshr[_n_s32]_x(svbool_t pg, svint32_t op1, uint64_t imm2) : "SRSHR Ztied1.S, Pg/M, Ztied1.S, #imm2" or "MOVPRFX Zresult, Zop1; SRSHR Zresult.S, Pg/M, Zresult.S, #imm2" + /// svint32_t svrshr[_n_s32]_z(svbool_t pg, svint32_t op1, uint64_t imm2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; SRSHR Zresult.S, Pg/M, Zresult.S, #imm2" + public static unsafe Vector ShiftRightArithmeticRounded(Vector value, [ConstantExpected] byte count); + + /// svint64_t svrshr[_n_s64]_m(svbool_t pg, svint64_t op1, uint64_t imm2) : "SRSHR Ztied1.D, Pg/M, Ztied1.D, #imm2" or "MOVPRFX Zresult, Zop1; SRSHR Zresult.D, Pg/M, Zresult.D, #imm2" + /// svint64_t svrshr[_n_s64]_x(svbool_t pg, svint64_t op1, uint64_t imm2) : "SRSHR Ztied1.D, Pg/M, Ztied1.D, #imm2" or "MOVPRFX Zresult, Zop1; SRSHR Zresult.D, Pg/M, Zresult.D, #imm2" + /// svint64_t svrshr[_n_s64]_z(svbool_t pg, svint64_t op1, uint64_t imm2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; SRSHR Zresult.D, Pg/M, Zresult.D, #imm2" + public static unsafe Vector ShiftRightArithmeticRounded(Vector value, [ConstantExpected] byte count); + + + /// ShiftRightArithmeticRoundedAdd : Rounding shift right and accumulate + + /// svint8_t svrsra[_n_s8](svint8_t op1, svint8_t op2, uint64_t imm3) : "SRSRA Ztied1.B, Zop2.B, #imm3" or "MOVPRFX Zresult, Zop1; SRSRA Zresult.B, Zop2.B, #imm3" + public static unsafe Vector ShiftRightArithmeticRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count); + + /// svint16_t svrsra[_n_s16](svint16_t op1, svint16_t op2, uint64_t imm3) : "SRSRA Ztied1.H, Zop2.H, #imm3" or "MOVPRFX Zresult, Zop1; SRSRA Zresult.H, Zop2.H, #imm3" + public static unsafe Vector ShiftRightArithmeticRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count); + + /// svint32_t svrsra[_n_s32](svint32_t op1, svint32_t op2, uint64_t imm3) : "SRSRA Ztied1.S, Zop2.S, #imm3" or "MOVPRFX Zresult, Zop1; SRSRA Zresult.S, Zop2.S, #imm3" + public static unsafe Vector ShiftRightArithmeticRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count); + + /// svint64_t svrsra[_n_s64](svint64_t op1, svint64_t op2, uint64_t imm3) : "SRSRA Ztied1.D, Zop2.D, #imm3" or "MOVPRFX Zresult, Zop1; SRSRA Zresult.D, Zop2.D, #imm3" + public static unsafe Vector ShiftRightArithmeticRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count); + + + /// ShiftRightArithmeticRoundedNarrowingSaturateEven : Saturating rounding shift right narrow (bottom) + + /// svint8_t svqrshrnb[_n_s16](svint16_t op1, uint64_t imm2) : "SQRSHRNB Zresult.B, Zop1.H, #imm2" + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count); + + /// svint16_t svqrshrnb[_n_s32](svint32_t op1, uint64_t imm2) : "SQRSHRNB Zresult.H, Zop1.S, #imm2" + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count); + + /// svint32_t svqrshrnb[_n_s64](svint64_t op1, uint64_t imm2) : "SQRSHRNB Zresult.S, Zop1.D, #imm2" + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count); + + + /// ShiftRightArithmeticRoundedNarrowingSaturateOdd : Saturating rounding shift right narrow (top) + + /// svint8_t svqrshrnt[_n_s16](svint8_t even, svint16_t op1, uint64_t imm2) : "SQRSHRNT Ztied.B, Zop1.H, #imm2" + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count); + + /// svint16_t svqrshrnt[_n_s32](svint16_t even, svint32_t op1, uint64_t imm2) : "SQRSHRNT Ztied.H, Zop1.S, #imm2" + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count); + + /// svint32_t svqrshrnt[_n_s64](svint32_t even, svint64_t op1, uint64_t imm2) : "SQRSHRNT Ztied.S, Zop1.D, #imm2" + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count); + + + /// ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven : Saturating rounding shift right unsigned narrow (bottom) + + /// svuint8_t svqrshrunb[_n_s16](svint16_t op1, uint64_t imm2) : "SQRSHRUNB Zresult.B, Zop1.H, #imm2" + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count); + + /// svuint16_t svqrshrunb[_n_s32](svint32_t op1, uint64_t imm2) : "SQRSHRUNB Zresult.H, Zop1.S, #imm2" + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count); + + /// svuint32_t svqrshrunb[_n_s64](svint64_t op1, uint64_t imm2) : "SQRSHRUNB Zresult.S, Zop1.D, #imm2" + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count); + + + /// ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd : Saturating rounding shift right unsigned narrow (top) + + /// svuint8_t svqrshrunt[_n_s16](svuint8_t even, svint16_t op1, uint64_t imm2) : "SQRSHRUNT Ztied.B, Zop1.H, #imm2" + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count); + + /// svuint16_t svqrshrunt[_n_s32](svuint16_t even, svint32_t op1, uint64_t imm2) : "SQRSHRUNT Ztied.H, Zop1.S, #imm2" + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count); + + /// svuint32_t svqrshrunt[_n_s64](svuint32_t even, svint64_t op1, uint64_t imm2) : "SQRSHRUNT Ztied.S, Zop1.D, #imm2" + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count); + + + /// ShiftRightLogicalAdd : Shift right and accumulate + + /// svuint8_t svsra[_n_u8](svuint8_t op1, svuint8_t op2, uint64_t imm3) : "USRA Ztied1.B, Zop2.B, #imm3" or "MOVPRFX Zresult, Zop1; USRA Zresult.B, Zop2.B, #imm3" + public static unsafe Vector ShiftRightLogicalAdd(Vector addend, Vector value, [ConstantExpected] byte count); + + /// svuint16_t svsra[_n_u16](svuint16_t op1, svuint16_t op2, uint64_t imm3) : "USRA Ztied1.H, Zop2.H, #imm3" or "MOVPRFX Zresult, Zop1; USRA Zresult.H, Zop2.H, #imm3" + public static unsafe Vector ShiftRightLogicalAdd(Vector addend, Vector value, [ConstantExpected] byte count); + + /// svuint32_t svsra[_n_u32](svuint32_t op1, svuint32_t op2, uint64_t imm3) : "USRA Ztied1.S, Zop2.S, #imm3" or "MOVPRFX Zresult, Zop1; USRA Zresult.S, Zop2.S, #imm3" + public static unsafe Vector ShiftRightLogicalAdd(Vector addend, Vector value, [ConstantExpected] byte count); + + /// svuint64_t svsra[_n_u64](svuint64_t op1, svuint64_t op2, uint64_t imm3) : "USRA Ztied1.D, Zop2.D, #imm3" or "MOVPRFX Zresult, Zop1; USRA Zresult.D, Zop2.D, #imm3" + public static unsafe Vector ShiftRightLogicalAdd(Vector addend, Vector value, [ConstantExpected] byte count); + + + /// ShiftRightLogicalNarrowingEven : Shift right narrow (bottom) + + /// svint8_t svshrnb[_n_s16](svint16_t op1, uint64_t imm2) : "SHRNB Zresult.B, Zop1.H, #imm2" + public static unsafe Vector ShiftRightLogicalNarrowingEven(Vector value, [ConstantExpected] byte count); + + /// svint16_t svshrnb[_n_s32](svint32_t op1, uint64_t imm2) : "SHRNB Zresult.H, Zop1.S, #imm2" + public static unsafe Vector ShiftRightLogicalNarrowingEven(Vector value, [ConstantExpected] byte count); + + /// svint32_t svshrnb[_n_s64](svint64_t op1, uint64_t imm2) : "SHRNB Zresult.S, Zop1.D, #imm2" + public static unsafe Vector ShiftRightLogicalNarrowingEven(Vector value, [ConstantExpected] byte count); + + /// svuint8_t svshrnb[_n_u16](svuint16_t op1, uint64_t imm2) : "SHRNB Zresult.B, Zop1.H, #imm2" + public static unsafe Vector ShiftRightLogicalNarrowingEven(Vector value, [ConstantExpected] byte count); + + /// svuint16_t svshrnb[_n_u32](svuint32_t op1, uint64_t imm2) : "SHRNB Zresult.H, Zop1.S, #imm2" + public static unsafe Vector ShiftRightLogicalNarrowingEven(Vector value, [ConstantExpected] byte count); + + /// svuint32_t svshrnb[_n_u64](svuint64_t op1, uint64_t imm2) : "SHRNB Zresult.S, Zop1.D, #imm2" + public static unsafe Vector ShiftRightLogicalNarrowingEven(Vector value, [ConstantExpected] byte count); + + + /// ShiftRightLogicalNarrowingOdd : Shift right narrow (top) + + /// svint8_t svshrnt[_n_s16](svint8_t even, svint16_t op1, uint64_t imm2) : "SHRNT Ztied.B, Zop1.H, #imm2" + public static unsafe Vector ShiftRightLogicalNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count); + + /// svint16_t svshrnt[_n_s32](svint16_t even, svint32_t op1, uint64_t imm2) : "SHRNT Ztied.H, Zop1.S, #imm2" + public static unsafe Vector ShiftRightLogicalNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count); + + /// svint32_t svshrnt[_n_s64](svint32_t even, svint64_t op1, uint64_t imm2) : "SHRNT Ztied.S, Zop1.D, #imm2" + public static unsafe Vector ShiftRightLogicalNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count); + + /// svuint8_t svshrnt[_n_u16](svuint8_t even, svuint16_t op1, uint64_t imm2) : "SHRNT Ztied.B, Zop1.H, #imm2" + public static unsafe Vector ShiftRightLogicalNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count); + + /// svuint16_t svshrnt[_n_u32](svuint16_t even, svuint32_t op1, uint64_t imm2) : "SHRNT Ztied.H, Zop1.S, #imm2" + public static unsafe Vector ShiftRightLogicalNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count); + + /// svuint32_t svshrnt[_n_u64](svuint32_t even, svuint64_t op1, uint64_t imm2) : "SHRNT Ztied.S, Zop1.D, #imm2" + public static unsafe Vector ShiftRightLogicalNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count); + + + /// ShiftRightLogicalRounded : Rounding shift right + + /// svuint8_t svrshr[_n_u8]_m(svbool_t pg, svuint8_t op1, uint64_t imm2) : "URSHR Ztied1.B, Pg/M, Ztied1.B, #imm2" or "MOVPRFX Zresult, Zop1; URSHR Zresult.B, Pg/M, Zresult.B, #imm2" + /// svuint8_t svrshr[_n_u8]_x(svbool_t pg, svuint8_t op1, uint64_t imm2) : "URSHR Ztied1.B, Pg/M, Ztied1.B, #imm2" or "MOVPRFX Zresult, Zop1; URSHR Zresult.B, Pg/M, Zresult.B, #imm2" + /// svuint8_t svrshr[_n_u8]_z(svbool_t pg, svuint8_t op1, uint64_t imm2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; URSHR Zresult.B, Pg/M, Zresult.B, #imm2" + public static unsafe Vector ShiftRightLogicalRounded(Vector value, [ConstantExpected] byte count); + + /// svuint16_t svrshr[_n_u16]_m(svbool_t pg, svuint16_t op1, uint64_t imm2) : "URSHR Ztied1.H, Pg/M, Ztied1.H, #imm2" or "MOVPRFX Zresult, Zop1; URSHR Zresult.H, Pg/M, Zresult.H, #imm2" + /// svuint16_t svrshr[_n_u16]_x(svbool_t pg, svuint16_t op1, uint64_t imm2) : "URSHR Ztied1.H, Pg/M, Ztied1.H, #imm2" or "MOVPRFX Zresult, Zop1; URSHR Zresult.H, Pg/M, Zresult.H, #imm2" + /// svuint16_t svrshr[_n_u16]_z(svbool_t pg, svuint16_t op1, uint64_t imm2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; URSHR Zresult.H, Pg/M, Zresult.H, #imm2" + public static unsafe Vector ShiftRightLogicalRounded(Vector value, [ConstantExpected] byte count); + + /// svuint32_t svrshr[_n_u32]_m(svbool_t pg, svuint32_t op1, uint64_t imm2) : "URSHR Ztied1.S, Pg/M, Ztied1.S, #imm2" or "MOVPRFX Zresult, Zop1; URSHR Zresult.S, Pg/M, Zresult.S, #imm2" + /// svuint32_t svrshr[_n_u32]_x(svbool_t pg, svuint32_t op1, uint64_t imm2) : "URSHR Ztied1.S, Pg/M, Ztied1.S, #imm2" or "MOVPRFX Zresult, Zop1; URSHR Zresult.S, Pg/M, Zresult.S, #imm2" + /// svuint32_t svrshr[_n_u32]_z(svbool_t pg, svuint32_t op1, uint64_t imm2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; URSHR Zresult.S, Pg/M, Zresult.S, #imm2" + public static unsafe Vector ShiftRightLogicalRounded(Vector value, [ConstantExpected] byte count); + + /// svuint64_t svrshr[_n_u64]_m(svbool_t pg, svuint64_t op1, uint64_t imm2) : "URSHR Ztied1.D, Pg/M, Ztied1.D, #imm2" or "MOVPRFX Zresult, Zop1; URSHR Zresult.D, Pg/M, Zresult.D, #imm2" + /// svuint64_t svrshr[_n_u64]_x(svbool_t pg, svuint64_t op1, uint64_t imm2) : "URSHR Ztied1.D, Pg/M, Ztied1.D, #imm2" or "MOVPRFX Zresult, Zop1; URSHR Zresult.D, Pg/M, Zresult.D, #imm2" + /// svuint64_t svrshr[_n_u64]_z(svbool_t pg, svuint64_t op1, uint64_t imm2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; URSHR Zresult.D, Pg/M, Zresult.D, #imm2" + public static unsafe Vector ShiftRightLogicalRounded(Vector value, [ConstantExpected] byte count); + + + /// ShiftRightLogicalRoundedAdd : Rounding shift right and accumulate + + /// svuint8_t svrsra[_n_u8](svuint8_t op1, svuint8_t op2, uint64_t imm3) : "URSRA Ztied1.B, Zop2.B, #imm3" or "MOVPRFX Zresult, Zop1; URSRA Zresult.B, Zop2.B, #imm3" + public static unsafe Vector ShiftRightLogicalRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count); + + /// svuint16_t svrsra[_n_u16](svuint16_t op1, svuint16_t op2, uint64_t imm3) : "URSRA Ztied1.H, Zop2.H, #imm3" or "MOVPRFX Zresult, Zop1; URSRA Zresult.H, Zop2.H, #imm3" + public static unsafe Vector ShiftRightLogicalRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count); + + /// svuint32_t svrsra[_n_u32](svuint32_t op1, svuint32_t op2, uint64_t imm3) : "URSRA Ztied1.S, Zop2.S, #imm3" or "MOVPRFX Zresult, Zop1; URSRA Zresult.S, Zop2.S, #imm3" + public static unsafe Vector ShiftRightLogicalRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count); + + /// svuint64_t svrsra[_n_u64](svuint64_t op1, svuint64_t op2, uint64_t imm3) : "URSRA Ztied1.D, Zop2.D, #imm3" or "MOVPRFX Zresult, Zop1; URSRA Zresult.D, Zop2.D, #imm3" + public static unsafe Vector ShiftRightLogicalRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count); + + + /// ShiftRightLogicalRoundedNarrowingEven : Rounding shift right narrow (bottom) + + /// svint8_t svrshrnb[_n_s16](svint16_t op1, uint64_t imm2) : "RSHRNB Zresult.B, Zop1.H, #imm2" + public static unsafe Vector ShiftRightLogicalRoundedNarrowingEven(Vector value, [ConstantExpected] byte count); + + /// svint16_t svrshrnb[_n_s32](svint32_t op1, uint64_t imm2) : "RSHRNB Zresult.H, Zop1.S, #imm2" + public static unsafe Vector ShiftRightLogicalRoundedNarrowingEven(Vector value, [ConstantExpected] byte count); + + /// svint32_t svrshrnb[_n_s64](svint64_t op1, uint64_t imm2) : "RSHRNB Zresult.S, Zop1.D, #imm2" + public static unsafe Vector ShiftRightLogicalRoundedNarrowingEven(Vector value, [ConstantExpected] byte count); + + /// svuint8_t svrshrnb[_n_u16](svuint16_t op1, uint64_t imm2) : "RSHRNB Zresult.B, Zop1.H, #imm2" + public static unsafe Vector ShiftRightLogicalRoundedNarrowingEven(Vector value, [ConstantExpected] byte count); + + /// svuint16_t svrshrnb[_n_u32](svuint32_t op1, uint64_t imm2) : "RSHRNB Zresult.H, Zop1.S, #imm2" + public static unsafe Vector ShiftRightLogicalRoundedNarrowingEven(Vector value, [ConstantExpected] byte count); + + /// svuint32_t svrshrnb[_n_u64](svuint64_t op1, uint64_t imm2) : "RSHRNB Zresult.S, Zop1.D, #imm2" + public static unsafe Vector ShiftRightLogicalRoundedNarrowingEven(Vector value, [ConstantExpected] byte count); + + + /// ShiftRightLogicalRoundedNarrowingOdd : Rounding shift right narrow (top) + + /// svint8_t svrshrnt[_n_s16](svint8_t even, svint16_t op1, uint64_t imm2) : "RSHRNT Ztied.B, Zop1.H, #imm2" + public static unsafe Vector ShiftRightLogicalRoundedNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count); + + /// svint16_t svrshrnt[_n_s32](svint16_t even, svint32_t op1, uint64_t imm2) : "RSHRNT Ztied.H, Zop1.S, #imm2" + public static unsafe Vector ShiftRightLogicalRoundedNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count); + + /// svint32_t svrshrnt[_n_s64](svint32_t even, svint64_t op1, uint64_t imm2) : "RSHRNT Ztied.S, Zop1.D, #imm2" + public static unsafe Vector ShiftRightLogicalRoundedNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count); + + /// svuint8_t svrshrnt[_n_u16](svuint8_t even, svuint16_t op1, uint64_t imm2) : "RSHRNT Ztied.B, Zop1.H, #imm2" + public static unsafe Vector ShiftRightLogicalRoundedNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count); + + /// svuint16_t svrshrnt[_n_u32](svuint16_t even, svuint32_t op1, uint64_t imm2) : "RSHRNT Ztied.H, Zop1.S, #imm2" + public static unsafe Vector ShiftRightLogicalRoundedNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count); + + /// svuint32_t svrshrnt[_n_u64](svuint32_t even, svuint64_t op1, uint64_t imm2) : "RSHRNT Ztied.S, Zop1.D, #imm2" + public static unsafe Vector ShiftRightLogicalRoundedNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count); + + + /// ShiftRightLogicalRoundedNarrowingSaturateEven : Saturating rounding shift right narrow (bottom) + + /// svuint8_t svqrshrnb[_n_u16](svuint16_t op1, uint64_t imm2) : "UQRSHRNB Zresult.B, Zop1.H, #imm2" + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count); + + /// svuint16_t svqrshrnb[_n_u32](svuint32_t op1, uint64_t imm2) : "UQRSHRNB Zresult.H, Zop1.S, #imm2" + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count); + + /// svuint32_t svqrshrnb[_n_u64](svuint64_t op1, uint64_t imm2) : "UQRSHRNB Zresult.S, Zop1.D, #imm2" + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count); + + + /// ShiftRightLogicalRoundedNarrowingSaturateOdd : Saturating rounding shift right narrow (top) + + /// svuint8_t svqrshrnt[_n_u16](svuint8_t even, svuint16_t op1, uint64_t imm2) : "UQRSHRNT Ztied.B, Zop1.H, #imm2" + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count); + + /// svuint16_t svqrshrnt[_n_u32](svuint16_t even, svuint32_t op1, uint64_t imm2) : "UQRSHRNT Ztied.H, Zop1.S, #imm2" + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count); + + /// svuint32_t svqrshrnt[_n_u64](svuint32_t even, svuint64_t op1, uint64_t imm2) : "UQRSHRNT Ztied.S, Zop1.D, #imm2" + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count); + + + /// Xor : Bitwise exclusive OR of three vectors + + /// svint8_t sveor3[_s8](svint8_t op1, svint8_t op2, svint8_t op3) : "EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "EOR3 Ztied2.D, Ztied2.D, Zop3.D, Zop1.D" or "EOR3 Ztied3.D, Ztied3.D, Zop1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; EOR3 Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3); + + /// svint16_t sveor3[_s16](svint16_t op1, svint16_t op2, svint16_t op3) : "EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "EOR3 Ztied2.D, Ztied2.D, Zop3.D, Zop1.D" or "EOR3 Ztied3.D, Ztied3.D, Zop1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; EOR3 Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3); + + /// svint32_t sveor3[_s32](svint32_t op1, svint32_t op2, svint32_t op3) : "EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "EOR3 Ztied2.D, Ztied2.D, Zop3.D, Zop1.D" or "EOR3 Ztied3.D, Ztied3.D, Zop1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; EOR3 Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3); + + /// svint64_t sveor3[_s64](svint64_t op1, svint64_t op2, svint64_t op3) : "EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "EOR3 Ztied2.D, Ztied2.D, Zop3.D, Zop1.D" or "EOR3 Ztied3.D, Ztied3.D, Zop1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; EOR3 Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3); + + /// svuint8_t sveor3[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3) : "EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "EOR3 Ztied2.D, Ztied2.D, Zop3.D, Zop1.D" or "EOR3 Ztied3.D, Ztied3.D, Zop1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; EOR3 Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3); + + /// svuint16_t sveor3[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3) : "EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "EOR3 Ztied2.D, Ztied2.D, Zop3.D, Zop1.D" or "EOR3 Ztied3.D, Ztied3.D, Zop1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; EOR3 Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3); + + /// svuint32_t sveor3[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) : "EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "EOR3 Ztied2.D, Ztied2.D, Zop3.D, Zop1.D" or "EOR3 Ztied3.D, Ztied3.D, Zop1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; EOR3 Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3); + + /// svuint64_t sveor3[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) : "EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D" or "EOR3 Ztied2.D, Ztied2.D, Zop3.D, Zop1.D" or "EOR3 Ztied3.D, Ztied3.D, Zop1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; EOR3 Zresult.D, Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3); + + + /// XorRotateRight : Bitwise exclusive OR and rotate right + + /// svint8_t svxar[_n_s8](svint8_t op1, svint8_t op2, uint64_t imm3) : "XAR Ztied1.B, Ztied1.B, Zop2.B, #imm3" or "XAR Ztied2.B, Ztied2.B, Zop1.B, #imm3" or "MOVPRFX Zresult, Zop1; XAR Zresult.B, Zresult.B, Zop2.B, #imm3" + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count); + + /// svint16_t svxar[_n_s16](svint16_t op1, svint16_t op2, uint64_t imm3) : "XAR Ztied1.H, Ztied1.H, Zop2.H, #imm3" or "XAR Ztied2.H, Ztied2.H, Zop1.H, #imm3" or "MOVPRFX Zresult, Zop1; XAR Zresult.H, Zresult.H, Zop2.H, #imm3" + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count); + + /// svint32_t svxar[_n_s32](svint32_t op1, svint32_t op2, uint64_t imm3) : "XAR Ztied1.S, Ztied1.S, Zop2.S, #imm3" or "XAR Ztied2.S, Ztied2.S, Zop1.S, #imm3" or "MOVPRFX Zresult, Zop1; XAR Zresult.S, Zresult.S, Zop2.S, #imm3" + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count); + + /// svint64_t svxar[_n_s64](svint64_t op1, svint64_t op2, uint64_t imm3) : "XAR Ztied1.D, Ztied1.D, Zop2.D, #imm3" or "XAR Ztied2.D, Ztied2.D, Zop1.D, #imm3" or "MOVPRFX Zresult, Zop1; XAR Zresult.D, Zresult.D, Zop2.D, #imm3" + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count); + + /// svuint8_t svxar[_n_u8](svuint8_t op1, svuint8_t op2, uint64_t imm3) : "XAR Ztied1.B, Ztied1.B, Zop2.B, #imm3" or "XAR Ztied2.B, Ztied2.B, Zop1.B, #imm3" or "MOVPRFX Zresult, Zop1; XAR Zresult.B, Zresult.B, Zop2.B, #imm3" + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count); + + /// svuint16_t svxar[_n_u16](svuint16_t op1, svuint16_t op2, uint64_t imm3) : "XAR Ztied1.H, Ztied1.H, Zop2.H, #imm3" or "XAR Ztied2.H, Ztied2.H, Zop1.H, #imm3" or "MOVPRFX Zresult, Zop1; XAR Zresult.H, Zresult.H, Zop2.H, #imm3" + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count); + + /// svuint32_t svxar[_n_u32](svuint32_t op1, svuint32_t op2, uint64_t imm3) : "XAR Ztied1.S, Ztied1.S, Zop2.S, #imm3" or "XAR Ztied2.S, Ztied2.S, Zop1.S, #imm3" or "MOVPRFX Zresult, Zop1; XAR Zresult.S, Zresult.S, Zop2.S, #imm3" + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count); + + /// svuint64_t svxar[_n_u64](svuint64_t op1, svuint64_t op2, uint64_t imm3) : "XAR Ztied1.D, Ztied1.D, Zop2.D, #imm3" or "XAR Ztied2.D, Ztied2.D, Zop1.D, #imm3" or "MOVPRFX Zresult, Zop1; XAR Zresult.D, Zresult.D, Zop2.D, #imm3" + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count); + + + /// total method signatures: 188 + /// total method names: 38 +} + + + /// Rejected: + /// public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, sbyte mask); // svbcax[_n_s8] + /// public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, short mask); // svbcax[_n_s16] + /// public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, int mask); // svbcax[_n_s32] + /// public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, long mask); // svbcax[_n_s64] + /// public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, byte mask); // svbcax[_n_u8] + /// public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, ushort mask); // svbcax[_n_u16] + /// public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, uint mask); // svbcax[_n_u32] + /// public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, ulong mask); // svbcax[_n_u64] + /// public static unsafe Vector BitwiseSelect(Vector select, Vector left, sbyte right); // svbsl[_n_s8] + /// public static unsafe Vector BitwiseSelect(Vector select, Vector left, short right); // svbsl[_n_s16] + /// public static unsafe Vector BitwiseSelect(Vector select, Vector left, int right); // svbsl[_n_s32] + /// public static unsafe Vector BitwiseSelect(Vector select, Vector left, long right); // svbsl[_n_s64] + /// public static unsafe Vector BitwiseSelect(Vector select, Vector left, byte right); // svbsl[_n_u8] + /// public static unsafe Vector BitwiseSelect(Vector select, Vector left, ushort right); // svbsl[_n_u16] + /// public static unsafe Vector BitwiseSelect(Vector select, Vector left, uint right); // svbsl[_n_u32] + /// public static unsafe Vector BitwiseSelect(Vector select, Vector left, ulong right); // svbsl[_n_u64] + /// public static unsafe Vector BitwiseSelectInverted(Vector select, Vector left, Vector right); // svnbsl[_s8] + /// public static unsafe Vector BitwiseSelectInverted(Vector select, Vector left, Vector right); // svnbsl[_s16] + /// public static unsafe Vector BitwiseSelectInverted(Vector select, Vector left, Vector right); // svnbsl[_s32] + /// public static unsafe Vector BitwiseSelectInverted(Vector select, Vector left, Vector right); // svnbsl[_s64] + /// public static unsafe Vector BitwiseSelectInverted(Vector select, Vector left, Vector right); // svnbsl[_u8] + /// public static unsafe Vector BitwiseSelectInverted(Vector select, Vector left, Vector right); // svnbsl[_u16] + /// public static unsafe Vector BitwiseSelectInverted(Vector select, Vector left, Vector right); // svnbsl[_u32] + /// public static unsafe Vector BitwiseSelectInverted(Vector select, Vector left, Vector right); // svnbsl[_u64] + /// public static unsafe Vector BitwiseSelectInverted(Vector select, Vector left, sbyte right); // svnbsl[_n_s8] + /// public static unsafe Vector BitwiseSelectInverted(Vector select, Vector left, short right); // svnbsl[_n_s16] + /// public static unsafe Vector BitwiseSelectInverted(Vector select, Vector left, int right); // svnbsl[_n_s32] + /// public static unsafe Vector BitwiseSelectInverted(Vector select, Vector left, long right); // svnbsl[_n_s64] + /// public static unsafe Vector BitwiseSelectInverted(Vector select, Vector left, byte right); // svnbsl[_n_u8] + /// public static unsafe Vector BitwiseSelectInverted(Vector select, Vector left, ushort right); // svnbsl[_n_u16] + /// public static unsafe Vector BitwiseSelectInverted(Vector select, Vector left, uint right); // svnbsl[_n_u32] + /// public static unsafe Vector BitwiseSelectInverted(Vector select, Vector left, ulong right); // svnbsl[_n_u64] + /// public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, sbyte right); // svbsl1n[_n_s8] + /// public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, short right); // svbsl1n[_n_s16] + /// public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, int right); // svbsl1n[_n_s32] + /// public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, long right); // svbsl1n[_n_s64] + /// public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, byte right); // svbsl1n[_n_u8] + /// public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, ushort right); // svbsl1n[_n_u16] + /// public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, uint right); // svbsl1n[_n_u32] + /// public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, ulong right); // svbsl1n[_n_u64] + /// public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, sbyte right); // svbsl2n[_n_s8] + /// public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, short right); // svbsl2n[_n_s16] + /// public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, int right); // svbsl2n[_n_s32] + /// public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, long right); // svbsl2n[_n_s64] + /// public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, byte right); // svbsl2n[_n_u8] + /// public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, ushort right); // svbsl2n[_n_u16] + /// public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, uint right); // svbsl2n[_n_u32] + /// public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, ulong right); // svbsl2n[_n_u64] + /// public static unsafe Vector ShiftArithmeticSaturate(Vector value, sbyte count); // svqshl[_n_s8]_m or svqshl[_n_s8]_x or svqshl[_n_s8]_z + /// public static unsafe Vector ShiftArithmeticSaturate(Vector value, short count); // svqshl[_n_s16]_m or svqshl[_n_s16]_x or svqshl[_n_s16]_z + /// public static unsafe Vector ShiftArithmeticSaturate(Vector value, int count); // svqshl[_n_s32]_m or svqshl[_n_s32]_x or svqshl[_n_s32]_z + /// public static unsafe Vector ShiftArithmeticSaturate(Vector value, long count); // svqshl[_n_s64]_m or svqshl[_n_s64]_x or svqshl[_n_s64]_z + /// public static unsafe Vector ShiftLeftLogicalSaturate(Vector value, sbyte count); // svqshl[_n_u8]_m or svqshl[_n_u8]_x or svqshl[_n_u8]_z + /// public static unsafe Vector ShiftLeftLogicalSaturate(Vector value, short count); // svqshl[_n_u16]_m or svqshl[_n_u16]_x or svqshl[_n_u16]_z + /// public static unsafe Vector ShiftLeftLogicalSaturate(Vector value, int count); // svqshl[_n_u32]_m or svqshl[_n_u32]_x or svqshl[_n_u32]_z + /// public static unsafe Vector ShiftLeftLogicalSaturate(Vector value, long count); // svqshl[_n_u64]_m or svqshl[_n_u64]_x or svqshl[_n_u64]_z + /// public static unsafe Vector ShiftLogicalRounded(Vector value, sbyte count); // svrshl[_n_s8]_m or svrshl[_n_s8]_x or svrshl[_n_s8]_z + /// public static unsafe Vector ShiftLogicalRounded(Vector value, short count); // svrshl[_n_s16]_m or svrshl[_n_s16]_x or svrshl[_n_s16]_z + /// public static unsafe Vector ShiftLogicalRounded(Vector value, int count); // svrshl[_n_s32]_m or svrshl[_n_s32]_x or svrshl[_n_s32]_z + /// public static unsafe Vector ShiftLogicalRounded(Vector value, long count); // svrshl[_n_s64]_m or svrshl[_n_s64]_x or svrshl[_n_s64]_z + /// public static unsafe Vector ShiftLogicalRounded(Vector value, sbyte count); // svrshl[_n_u8]_m or svrshl[_n_u8]_x or svrshl[_n_u8]_z + /// public static unsafe Vector ShiftLogicalRounded(Vector value, short count); // svrshl[_n_u16]_m or svrshl[_n_u16]_x or svrshl[_n_u16]_z + /// public static unsafe Vector ShiftLogicalRounded(Vector value, int count); // svrshl[_n_u32]_m or svrshl[_n_u32]_x or svrshl[_n_u32]_z + /// public static unsafe Vector ShiftLogicalRounded(Vector value, long count); // svrshl[_n_u64]_m or svrshl[_n_u64]_x or svrshl[_n_u64]_z + /// public static unsafe Vector ShiftLogicalRoundedSaturate(Vector value, sbyte count); // svqrshl[_n_s8]_m or svqrshl[_n_s8]_x or svqrshl[_n_s8]_z + /// public static unsafe Vector ShiftLogicalRoundedSaturate(Vector value, short count); // svqrshl[_n_s16]_m or svqrshl[_n_s16]_x or svqrshl[_n_s16]_z + /// public static unsafe Vector ShiftLogicalRoundedSaturate(Vector value, int count); // svqrshl[_n_s32]_m or svqrshl[_n_s32]_x or svqrshl[_n_s32]_z + /// public static unsafe Vector ShiftLogicalRoundedSaturate(Vector value, long count); // svqrshl[_n_s64]_m or svqrshl[_n_s64]_x or svqrshl[_n_s64]_z + /// public static unsafe Vector ShiftLogicalRoundedSaturate(Vector value, sbyte count); // svqrshl[_n_u8]_m or svqrshl[_n_u8]_x or svqrshl[_n_u8]_z + /// public static unsafe Vector ShiftLogicalRoundedSaturate(Vector value, short count); // svqrshl[_n_u16]_m or svqrshl[_n_u16]_x or svqrshl[_n_u16]_z + /// public static unsafe Vector ShiftLogicalRoundedSaturate(Vector value, int count); // svqrshl[_n_u32]_m or svqrshl[_n_u32]_x or svqrshl[_n_u32]_z + /// public static unsafe Vector ShiftLogicalRoundedSaturate(Vector value, long count); // svqrshl[_n_u64]_m or svqrshl[_n_u64]_x or svqrshl[_n_u64]_z + /// public static unsafe Vector Xor(Vector value1, Vector value2, sbyte value3); // sveor3[_n_s8] + /// public static unsafe Vector Xor(Vector value1, Vector value2, short value3); // sveor3[_n_s16] + /// public static unsafe Vector Xor(Vector value1, Vector value2, int value3); // sveor3[_n_s32] + /// public static unsafe Vector Xor(Vector value1, Vector value2, long value3); // sveor3[_n_s64] + /// public static unsafe Vector Xor(Vector value1, Vector value2, byte value3); // sveor3[_n_u8] + /// public static unsafe Vector Xor(Vector value1, Vector value2, ushort value3); // sveor3[_n_u16] + /// public static unsafe Vector Xor(Vector value1, Vector value2, uint value3); // sveor3[_n_u32] + /// public static unsafe Vector Xor(Vector value1, Vector value2, ulong value3); // sveor3[_n_u64] + /// Total Rejected: 80 + + /// Total ACLE covered across API: 388 + diff --git a/sve_api/out_api/apiraw_FEAT_SVE2__counting.cs b/sve_api/out_api/apiraw_FEAT_SVE2__counting.cs new file mode 100644 index 0000000000000..202f73d93b197 --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_SVE2__counting.cs @@ -0,0 +1,55 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve2 : AdvSimd /// Feature: FEAT_SVE2 Category: counting +{ + + /// T: [uint, int], [ulong, long] + public static unsafe Vector CountMatchingElements(Vector mask, Vector left, Vector right); // HISTCNT + + /// T: uint, ulong + public static unsafe Vector CountMatchingElements(Vector mask, Vector left, Vector right); // HISTCNT + + public static unsafe Vector CountMatchingElementsIn128BitSegments(Vector left, Vector right); // HISTSEG + + public static unsafe Vector CountMatchingElementsIn128BitSegments(Vector left, Vector right); // HISTSEG + + /// total method signatures: 4 + +} + + +/// Full API +public abstract partial class Sve2 : AdvSimd /// Feature: FEAT_SVE2 Category: counting +{ + /// CountMatchingElements : Count matching elements + + /// svuint32_t svhistcnt[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) : "HISTCNT Zresult.S, Pg/Z, Zop1.S, Zop2.S" + public static unsafe Vector CountMatchingElements(Vector mask, Vector left, Vector right); + + /// svuint64_t svhistcnt[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) : "HISTCNT Zresult.D, Pg/Z, Zop1.D, Zop2.D" + public static unsafe Vector CountMatchingElements(Vector mask, Vector left, Vector right); + + /// svuint32_t svhistcnt[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) : "HISTCNT Zresult.S, Pg/Z, Zop1.S, Zop2.S" + public static unsafe Vector CountMatchingElements(Vector mask, Vector left, Vector right); + + /// svuint64_t svhistcnt[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) : "HISTCNT Zresult.D, Pg/Z, Zop1.D, Zop2.D" + public static unsafe Vector CountMatchingElements(Vector mask, Vector left, Vector right); + + + /// CountMatchingElementsIn128BitSegments : Count matching elements in 128-bit segments + + /// svuint8_t svhistseg[_s8](svint8_t op1, svint8_t op2) : "HISTSEG Zresult.B, Zop1.B, Zop2.B" + public static unsafe Vector CountMatchingElementsIn128BitSegments(Vector left, Vector right); + + /// svuint8_t svhistseg[_u8](svuint8_t op1, svuint8_t op2) : "HISTSEG Zresult.B, Zop1.B, Zop2.B" + public static unsafe Vector CountMatchingElementsIn128BitSegments(Vector left, Vector right); + + + /// total method signatures: 6 + /// total method names: 2 +} + + + /// Total ACLE covered across API: 6 + diff --git a/sve_api/out_api/apiraw_FEAT_SVE2__fp.cs b/sve_api/out_api/apiraw_FEAT_SVE2__fp.cs new file mode 100644 index 0000000000000..0704a43e3b279 --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_SVE2__fp.cs @@ -0,0 +1,217 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve2 : AdvSimd /// Feature: FEAT_SVE2 Category: fp +{ + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation); // CADD // MOVPRFX + + public static unsafe Vector DownConvertNarrowingUpper(Vector value); // FCVTNT // predicated + + public static unsafe Vector DownConvertRoundingOdd(Vector value); // FCVTX // predicated, MOVPRFX + + public static unsafe Vector DownConvertRoundingOddUpper(Vector value); // FCVTXNT // predicated + + /// T: [int, float], [long, double] + public static unsafe Vector Log2(Vector value); // FLOGB // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation); // CMLA // MOVPRFX + + /// T: short, int, ushort, uint + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation); // CMLA // MOVPRFX + + public static unsafe Vector ReciprocalEstimate(Vector value); // URECPE // predicated, MOVPRFX + + public static unsafe Vector ReciprocalSqrtEstimate(Vector value); // URSQRTE // predicated, MOVPRFX + + /// T: sbyte, short, int, long + public static unsafe Vector SaturatingComplexAddRotate(Vector op1, Vector op2, [ConstantExpected] byte rotation); // SQCADD // MOVPRFX + + /// T: sbyte, short, int, long + public static unsafe Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(Vector op1, Vector op2, Vector op3, [ConstantExpected] byte rotation); // SQRDCMLAH // MOVPRFX + + /// T: short, int + public static unsafe Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(Vector op1, Vector op2, Vector op3, ulong imm_index, [ConstantExpected] byte rotation); // SQRDCMLAH // MOVPRFX + + public static unsafe Vector UpConvertWideningUpper(Vector value); // FCVTLT // predicated + + /// total method signatures: 13 + +} + + +/// Full API +public abstract partial class Sve2 : AdvSimd /// Feature: FEAT_SVE2 Category: fp +{ + /// AddRotateComplex : Complex add with rotate + + /// svint8_t svcadd[_s8](svint8_t op1, svint8_t op2, uint64_t imm_rotation) : "CADD Ztied1.B, Ztied1.B, Zop2.B, #imm_rotation" or "MOVPRFX Zresult, Zop1; CADD Zresult.B, Zresult.B, Zop2.B, #imm_rotation" + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation); + + /// svint16_t svcadd[_s16](svint16_t op1, svint16_t op2, uint64_t imm_rotation) : "CADD Ztied1.H, Ztied1.H, Zop2.H, #imm_rotation" or "MOVPRFX Zresult, Zop1; CADD Zresult.H, Zresult.H, Zop2.H, #imm_rotation" + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation); + + /// svint32_t svcadd[_s32](svint32_t op1, svint32_t op2, uint64_t imm_rotation) : "CADD Ztied1.S, Ztied1.S, Zop2.S, #imm_rotation" or "MOVPRFX Zresult, Zop1; CADD Zresult.S, Zresult.S, Zop2.S, #imm_rotation" + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation); + + /// svint64_t svcadd[_s64](svint64_t op1, svint64_t op2, uint64_t imm_rotation) : "CADD Ztied1.D, Ztied1.D, Zop2.D, #imm_rotation" or "MOVPRFX Zresult, Zop1; CADD Zresult.D, Zresult.D, Zop2.D, #imm_rotation" + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation); + + /// svuint8_t svcadd[_u8](svuint8_t op1, svuint8_t op2, uint64_t imm_rotation) : "CADD Ztied1.B, Ztied1.B, Zop2.B, #imm_rotation" or "MOVPRFX Zresult, Zop1; CADD Zresult.B, Zresult.B, Zop2.B, #imm_rotation" + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation); + + /// svuint16_t svcadd[_u16](svuint16_t op1, svuint16_t op2, uint64_t imm_rotation) : "CADD Ztied1.H, Ztied1.H, Zop2.H, #imm_rotation" or "MOVPRFX Zresult, Zop1; CADD Zresult.H, Zresult.H, Zop2.H, #imm_rotation" + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation); + + /// svuint32_t svcadd[_u32](svuint32_t op1, svuint32_t op2, uint64_t imm_rotation) : "CADD Ztied1.S, Ztied1.S, Zop2.S, #imm_rotation" or "MOVPRFX Zresult, Zop1; CADD Zresult.S, Zresult.S, Zop2.S, #imm_rotation" + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation); + + /// svuint64_t svcadd[_u64](svuint64_t op1, svuint64_t op2, uint64_t imm_rotation) : "CADD Ztied1.D, Ztied1.D, Zop2.D, #imm_rotation" or "MOVPRFX Zresult, Zop1; CADD Zresult.D, Zresult.D, Zop2.D, #imm_rotation" + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation); + + + /// DownConvertNarrowingUpper : Down convert and narrow (top) + + /// svfloat32_t svcvtnt_f32[_f64]_m(svfloat32_t even, svbool_t pg, svfloat64_t op) : "FCVTNT Ztied.S, Pg/M, Zop.D" + /// svfloat32_t svcvtnt_f32[_f64]_x(svfloat32_t even, svbool_t pg, svfloat64_t op) : "FCVTNT Ztied.S, Pg/M, Zop.D" + public static unsafe Vector DownConvertNarrowingUpper(Vector value); + + + /// DownConvertRoundingOdd : Down convert, rounding to odd + + /// svfloat32_t svcvtx_f32[_f64]_m(svfloat32_t inactive, svbool_t pg, svfloat64_t op) : "FCVTX Ztied.S, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; FCVTX Zresult.S, Pg/M, Zop.D" + /// svfloat32_t svcvtx_f32[_f64]_x(svbool_t pg, svfloat64_t op) : "FCVTX Ztied.S, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; FCVTX Zresult.S, Pg/M, Zop.D" + /// svfloat32_t svcvtx_f32[_f64]_z(svbool_t pg, svfloat64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; FCVTX Zresult.S, Pg/M, Zop.D" + public static unsafe Vector DownConvertRoundingOdd(Vector value); + + + /// DownConvertRoundingOddUpper : Down convert, rounding to odd (top) + + /// svfloat32_t svcvtxnt_f32[_f64]_m(svfloat32_t even, svbool_t pg, svfloat64_t op) : "FCVTXNT Ztied.S, Pg/M, Zop.D" + /// svfloat32_t svcvtxnt_f32[_f64]_x(svfloat32_t even, svbool_t pg, svfloat64_t op) : "FCVTXNT Ztied.S, Pg/M, Zop.D" + public static unsafe Vector DownConvertRoundingOddUpper(Vector value); + + + /// Log2 : Base 2 logarithm as integer + + /// svint32_t svlogb[_f32]_m(svint32_t inactive, svbool_t pg, svfloat32_t op) : "FLOGB Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; FLOGB Zresult.S, Pg/M, Zop.S" + /// svint32_t svlogb[_f32]_x(svbool_t pg, svfloat32_t op) : "FLOGB Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; FLOGB Zresult.S, Pg/M, Zop.S" + /// svint32_t svlogb[_f32]_z(svbool_t pg, svfloat32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; FLOGB Zresult.S, Pg/M, Zop.S" + public static unsafe Vector Log2(Vector value); + + /// svint64_t svlogb[_f64]_m(svint64_t inactive, svbool_t pg, svfloat64_t op) : "FLOGB Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; FLOGB Zresult.D, Pg/M, Zop.D" + /// svint64_t svlogb[_f64]_x(svbool_t pg, svfloat64_t op) : "FLOGB Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; FLOGB Zresult.D, Pg/M, Zop.D" + /// svint64_t svlogb[_f64]_z(svbool_t pg, svfloat64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; FLOGB Zresult.D, Pg/M, Zop.D" + public static unsafe Vector Log2(Vector value); + + + /// MultiplyAddRotateComplex : Complex multiply-add with rotate + + /// svint8_t svcmla[_s8](svint8_t op1, svint8_t op2, svint8_t op3, uint64_t imm_rotation) : "CMLA Ztied1.B, Zop2.B, Zop3.B, #imm_rotation" or "MOVPRFX Zresult, Zop1; CMLA Zresult.B, Zop2.B, Zop3.B, #imm_rotation" + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation); + + /// svint16_t svcmla[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_rotation) : "CMLA Ztied1.H, Zop2.H, Zop3.H, #imm_rotation" or "MOVPRFX Zresult, Zop1; CMLA Zresult.H, Zop2.H, Zop3.H, #imm_rotation" + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation); + + /// svint32_t svcmla[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_rotation) : "CMLA Ztied1.S, Zop2.S, Zop3.S, #imm_rotation" or "MOVPRFX Zresult, Zop1; CMLA Zresult.S, Zop2.S, Zop3.S, #imm_rotation" + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation); + + /// svint64_t svcmla[_s64](svint64_t op1, svint64_t op2, svint64_t op3, uint64_t imm_rotation) : "CMLA Ztied1.D, Zop2.D, Zop3.D, #imm_rotation" or "MOVPRFX Zresult, Zop1; CMLA Zresult.D, Zop2.D, Zop3.D, #imm_rotation" + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation); + + /// svuint8_t svcmla[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3, uint64_t imm_rotation) : "CMLA Ztied1.B, Zop2.B, Zop3.B, #imm_rotation" or "MOVPRFX Zresult, Zop1; CMLA Zresult.B, Zop2.B, Zop3.B, #imm_rotation" + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation); + + /// svuint16_t svcmla[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_rotation) : "CMLA Ztied1.H, Zop2.H, Zop3.H, #imm_rotation" or "MOVPRFX Zresult, Zop1; CMLA Zresult.H, Zop2.H, Zop3.H, #imm_rotation" + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation); + + /// svuint32_t svcmla[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_rotation) : "CMLA Ztied1.S, Zop2.S, Zop3.S, #imm_rotation" or "MOVPRFX Zresult, Zop1; CMLA Zresult.S, Zop2.S, Zop3.S, #imm_rotation" + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation); + + /// svuint64_t svcmla[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3, uint64_t imm_rotation) : "CMLA Ztied1.D, Zop2.D, Zop3.D, #imm_rotation" or "MOVPRFX Zresult, Zop1; CMLA Zresult.D, Zop2.D, Zop3.D, #imm_rotation" + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation); + + + /// MultiplyAddRotateComplexBySelectedScalar : Complex multiply-add with rotate + + /// svint16_t svcmla_lane[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index, uint64_t imm_rotation) : "CMLA Ztied1.H, Zop2.H, Zop3.H[imm_index], #imm_rotation" or "MOVPRFX Zresult, Zop1; CMLA Zresult.H, Zop2.H, Zop3.H[imm_index], #imm_rotation" + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation); + + /// svint32_t svcmla_lane[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index, uint64_t imm_rotation) : "CMLA Ztied1.S, Zop2.S, Zop3.S[imm_index], #imm_rotation" or "MOVPRFX Zresult, Zop1; CMLA Zresult.S, Zop2.S, Zop3.S[imm_index], #imm_rotation" + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation); + + /// svuint16_t svcmla_lane[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index, uint64_t imm_rotation) : "CMLA Ztied1.H, Zop2.H, Zop3.H[imm_index], #imm_rotation" or "MOVPRFX Zresult, Zop1; CMLA Zresult.H, Zop2.H, Zop3.H[imm_index], #imm_rotation" + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation); + + /// svuint32_t svcmla_lane[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index, uint64_t imm_rotation) : "CMLA Ztied1.S, Zop2.S, Zop3.S[imm_index], #imm_rotation" or "MOVPRFX Zresult, Zop1; CMLA Zresult.S, Zop2.S, Zop3.S[imm_index], #imm_rotation" + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation); + + + /// ReciprocalEstimate : Reciprocal estimate + + /// svuint32_t svrecpe[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) : "URECPE Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; URECPE Zresult.S, Pg/M, Zop.S" + /// svuint32_t svrecpe[_u32]_x(svbool_t pg, svuint32_t op) : "URECPE Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; URECPE Zresult.S, Pg/M, Zop.S" + /// svuint32_t svrecpe[_u32]_z(svbool_t pg, svuint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; URECPE Zresult.S, Pg/M, Zop.S" + public static unsafe Vector ReciprocalEstimate(Vector value); + + + /// ReciprocalSqrtEstimate : Reciprocal square root estimate + + /// svuint32_t svrsqrte[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) : "URSQRTE Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; URSQRTE Zresult.S, Pg/M, Zop.S" + /// svuint32_t svrsqrte[_u32]_x(svbool_t pg, svuint32_t op) : "URSQRTE Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; URSQRTE Zresult.S, Pg/M, Zop.S" + /// svuint32_t svrsqrte[_u32]_z(svbool_t pg, svuint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; URSQRTE Zresult.S, Pg/M, Zop.S" + public static unsafe Vector ReciprocalSqrtEstimate(Vector value); + + + /// SaturatingComplexAddRotate : Saturating complex add with rotate + + /// svint8_t svqcadd[_s8](svint8_t op1, svint8_t op2, uint64_t imm_rotation) : "SQCADD Ztied1.B, Ztied1.B, Zop2.B, #imm_rotation" or "MOVPRFX Zresult, Zop1; SQCADD Zresult.B, Zresult.B, Zop2.B, #imm_rotation" + public static unsafe Vector SaturatingComplexAddRotate(Vector op1, Vector op2, [ConstantExpected] byte rotation); + + /// svint16_t svqcadd[_s16](svint16_t op1, svint16_t op2, uint64_t imm_rotation) : "SQCADD Ztied1.H, Ztied1.H, Zop2.H, #imm_rotation" or "MOVPRFX Zresult, Zop1; SQCADD Zresult.H, Zresult.H, Zop2.H, #imm_rotation" + public static unsafe Vector SaturatingComplexAddRotate(Vector op1, Vector op2, [ConstantExpected] byte rotation); + + /// svint32_t svqcadd[_s32](svint32_t op1, svint32_t op2, uint64_t imm_rotation) : "SQCADD Ztied1.S, Ztied1.S, Zop2.S, #imm_rotation" or "MOVPRFX Zresult, Zop1; SQCADD Zresult.S, Zresult.S, Zop2.S, #imm_rotation" + public static unsafe Vector SaturatingComplexAddRotate(Vector op1, Vector op2, [ConstantExpected] byte rotation); + + /// svint64_t svqcadd[_s64](svint64_t op1, svint64_t op2, uint64_t imm_rotation) : "SQCADD Ztied1.D, Ztied1.D, Zop2.D, #imm_rotation" or "MOVPRFX Zresult, Zop1; SQCADD Zresult.D, Zresult.D, Zop2.D, #imm_rotation" + public static unsafe Vector SaturatingComplexAddRotate(Vector op1, Vector op2, [ConstantExpected] byte rotation); + + + /// SaturatingRoundingDoublingComplexMultiplyAddHighRotate : Saturating rounding doubling complex multiply-add high with rotate + + /// svint8_t svqrdcmlah[_s8](svint8_t op1, svint8_t op2, svint8_t op3, uint64_t imm_rotation) : "SQRDCMLAH Ztied1.B, Zop2.B, Zop3.B, #imm_rotation" or "MOVPRFX Zresult, Zop1; SQRDCMLAH Zresult.B, Zop2.B, Zop3.B, #imm_rotation" + public static unsafe Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(Vector op1, Vector op2, Vector op3, [ConstantExpected] byte rotation); + + /// svint16_t svqrdcmlah[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_rotation) : "SQRDCMLAH Ztied1.H, Zop2.H, Zop3.H, #imm_rotation" or "MOVPRFX Zresult, Zop1; SQRDCMLAH Zresult.H, Zop2.H, Zop3.H, #imm_rotation" + public static unsafe Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(Vector op1, Vector op2, Vector op3, [ConstantExpected] byte rotation); + + /// svint32_t svqrdcmlah[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_rotation) : "SQRDCMLAH Ztied1.S, Zop2.S, Zop3.S, #imm_rotation" or "MOVPRFX Zresult, Zop1; SQRDCMLAH Zresult.S, Zop2.S, Zop3.S, #imm_rotation" + public static unsafe Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(Vector op1, Vector op2, Vector op3, [ConstantExpected] byte rotation); + + /// svint64_t svqrdcmlah[_s64](svint64_t op1, svint64_t op2, svint64_t op3, uint64_t imm_rotation) : "SQRDCMLAH Ztied1.D, Zop2.D, Zop3.D, #imm_rotation" or "MOVPRFX Zresult, Zop1; SQRDCMLAH Zresult.D, Zop2.D, Zop3.D, #imm_rotation" + public static unsafe Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(Vector op1, Vector op2, Vector op3, [ConstantExpected] byte rotation); + + /// svint16_t svqrdcmlah_lane[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index, uint64_t imm_rotation) : "SQRDCMLAH Ztied1.H, Zop2.H, Zop3.H[imm_index], #imm_rotation" or "MOVPRFX Zresult, Zop1; SQRDCMLAH Zresult.H, Zop2.H, Zop3.H[imm_index], #imm_rotation" + public static unsafe Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(Vector op1, Vector op2, Vector op3, ulong imm_index, [ConstantExpected] byte rotation); + + /// svint32_t svqrdcmlah_lane[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index, uint64_t imm_rotation) : "SQRDCMLAH Ztied1.S, Zop2.S, Zop3.S[imm_index], #imm_rotation" or "MOVPRFX Zresult, Zop1; SQRDCMLAH Zresult.S, Zop2.S, Zop3.S[imm_index], #imm_rotation" + public static unsafe Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(Vector op1, Vector op2, Vector op3, ulong imm_index, [ConstantExpected] byte rotation); + + + /// UpConvertWideningUpper : Up convert long (top) + + /// svfloat64_t svcvtlt_f64[_f32]_m(svfloat64_t inactive, svbool_t pg, svfloat32_t op) : "FCVTLT Ztied.D, Pg/M, Zop.S" + /// svfloat64_t svcvtlt_f64[_f32]_x(svbool_t pg, svfloat32_t op) : "FCVTLT Ztied.D, Pg/M, Ztied.S" + public static unsafe Vector UpConvertWideningUpper(Vector value); + + + /// total method signatures: 38 + /// total method names: 12 +} + + + /// Total ACLE covered across API: 51 + diff --git a/sve_api/out_api/apiraw_FEAT_SVE2__gatherloads.cs b/sve_api/out_api/apiraw_FEAT_SVE2__gatherloads.cs new file mode 100644 index 0000000000000..63aca6d1eb7c2 --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_SVE2__gatherloads.cs @@ -0,0 +1,502 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve2 : AdvSimd /// Feature: FEAT_SVE2 Category: gatherloads +{ + + /// T: [int, uint], [uint, uint], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, Vector addresses); // LDNT1B + + /// T: [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets); // LDNT1B + + /// T: [int, uint], [uint, uint], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, Vector addresses); // LDNT1SH + + /// T: [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, short* address, Vector indices); // LDNT1SH + + /// T: [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets); // LDNT1SH + + /// T: [long, ulong], [int, uint], [ulong, ulong], [uint, uint] + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, Vector addresses); // LDNT1SW + + /// T: [long, long], [int, int], [ulong, long], [uint, int], [long, ulong], [int, uint], [ulong, ulong], [uint, uint] + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices); // LDNT1SW + + /// T: [long, long], [int, int], [ulong, long], [uint, int], [long, ulong], [int, uint], [ulong, ulong], [uint, uint] + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets); // LDNT1SW + + /// T: [float, uint], [int, uint], [double, ulong], [long, ulong] + public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses); // LDNT1W or LDNT1D + + /// T: uint, ulong + public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses); // LDNT1W or LDNT1D + + /// T: [float, uint], [int, uint], [double, long], [ulong, long], [double, ulong], [long, ulong] + public static unsafe Vector GatherVectorNonTemporal(Vector mask, T* address, Vector offsets); // LDNT1W or LDNT1D + + /// T: uint, long, ulong + public static unsafe Vector GatherVectorNonTemporal(Vector mask, T* address, Vector offsets); // LDNT1W or LDNT1D + + /// T: [double, long], [ulong, long], [double, ulong], [long, ulong] + public static unsafe Vector GatherVectorNonTemporal(Vector mask, T* address, Vector indices); // LDNT1D + + /// T: long, ulong + public static unsafe Vector GatherVectorNonTemporal(Vector mask, T* address, Vector indices); // LDNT1D + + /// T: [int, uint], [uint, uint], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, Vector addresses); // LDNT1SB + + /// T: [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, sbyte* address, Vector offsets); // LDNT1SB + + /// T: [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, ushort* address, Vector offsets); // LDNT1H + + /// T: [int, uint], [uint, uint], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, Vector addresses); // LDNT1H + + /// T: [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, ushort* address, Vector indices); // LDNT1H + + /// T: [long, long], [int, int], [ulong, long], [uint, int], [long, ulong], [int, uint], [ulong, ulong], [uint, uint] + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets); // LDNT1W + + /// T: [long, ulong], [int, uint], [ulong, ulong], [uint, uint] + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, Vector addresses); // LDNT1W + + /// T: [long, long], [int, int], [ulong, long], [uint, int], [long, ulong], [int, uint], [ulong, ulong], [uint, uint] + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices); // LDNT1W + + /// total method signatures: 22 + +} + + +/// Full API +public abstract partial class Sve2 : AdvSimd /// Feature: FEAT_SVE2 Category: gatherloads +{ + /// GatherVectorByteZeroExtendNonTemporal : Load 8-bit data and zero-extend, non-temporal + + /// svint32_t svldnt1ub_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) : "LDNT1B Zresult.S, Pg/Z, [Zbases.S, XZR]" + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, Vector addresses); + + /// svuint32_t svldnt1ub_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) : "LDNT1B Zresult.S, Pg/Z, [Zbases.S, XZR]" + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, Vector addresses); + + /// svint64_t svldnt1ub_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) : "LDNT1B Zresult.D, Pg/Z, [Zbases.D, XZR]" + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, Vector addresses); + + /// svuint64_t svldnt1ub_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) : "LDNT1B Zresult.D, Pg/Z, [Zbases.D, XZR]" + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, Vector addresses); + + /// svint32_t svldnt1ub_gather_[u32]offset_s32(svbool_t pg, const uint8_t *base, svuint32_t offsets) : "LDNT1B Zresult.S, Pg/Z, [Zoffsets.S, Xbase]" + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets); + + /// svuint32_t svldnt1ub_gather_[u32]offset_u32(svbool_t pg, const uint8_t *base, svuint32_t offsets) : "LDNT1B Zresult.S, Pg/Z, [Zoffsets.S, Xbase]" + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets); + + /// svint64_t svldnt1ub_gather_[s64]offset_s64(svbool_t pg, const uint8_t *base, svint64_t offsets) : "LDNT1B Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets); + + /// svuint64_t svldnt1ub_gather_[s64]offset_u64(svbool_t pg, const uint8_t *base, svint64_t offsets) : "LDNT1B Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets); + + /// svint64_t svldnt1ub_gather_[u64]offset_s64(svbool_t pg, const uint8_t *base, svuint64_t offsets) : "LDNT1B Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets); + + /// svuint64_t svldnt1ub_gather_[u64]offset_u64(svbool_t pg, const uint8_t *base, svuint64_t offsets) : "LDNT1B Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets); + + + /// GatherVectorInt16SignExtendNonTemporal : Load 16-bit data and sign-extend, non-temporal + + /// svint32_t svldnt1sh_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) : "LDNT1SH Zresult.S, Pg/Z, [Zbases.S, XZR]" + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, Vector addresses); + + /// svuint32_t svldnt1sh_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) : "LDNT1SH Zresult.S, Pg/Z, [Zbases.S, XZR]" + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, Vector addresses); + + /// svint64_t svldnt1sh_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) : "LDNT1SH Zresult.D, Pg/Z, [Zbases.D, XZR]" + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, Vector addresses); + + /// svuint64_t svldnt1sh_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) : "LDNT1SH Zresult.D, Pg/Z, [Zbases.D, XZR]" + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, Vector addresses); + + /// svint64_t svldnt1sh_gather_[s64]index_s64(svbool_t pg, const int16_t *base, svint64_t indices) : "LDNT1SH Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, short* address, Vector indices); + + /// svuint64_t svldnt1sh_gather_[s64]index_u64(svbool_t pg, const int16_t *base, svint64_t indices) : "LDNT1SH Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, short* address, Vector indices); + + /// svint64_t svldnt1sh_gather_[u64]index_s64(svbool_t pg, const int16_t *base, svuint64_t indices) : "LDNT1SH Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, short* address, Vector indices); + + /// svuint64_t svldnt1sh_gather_[u64]index_u64(svbool_t pg, const int16_t *base, svuint64_t indices) : "LDNT1SH Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, short* address, Vector indices); + + + /// GatherVectorInt16WithByteOffsetsSignExtendNonTemporal : Load 16-bit data and sign-extend, non-temporal + + /// svint32_t svldnt1sh_gather_[u32]offset_s32(svbool_t pg, const int16_t *base, svuint32_t offsets) : "LDNT1SH Zresult.S, Pg/Z, [Zoffsets.S, Xbase]" + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets); + + /// svuint32_t svldnt1sh_gather_[u32]offset_u32(svbool_t pg, const int16_t *base, svuint32_t offsets) : "LDNT1SH Zresult.S, Pg/Z, [Zoffsets.S, Xbase]" + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets); + + /// svint64_t svldnt1sh_gather_[s64]offset_s64(svbool_t pg, const int16_t *base, svint64_t offsets) : "LDNT1SH Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets); + + /// svuint64_t svldnt1sh_gather_[s64]offset_u64(svbool_t pg, const int16_t *base, svint64_t offsets) : "LDNT1SH Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets); + + /// svint64_t svldnt1sh_gather_[u64]offset_s64(svbool_t pg, const int16_t *base, svuint64_t offsets) : "LDNT1SH Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets); + + /// svuint64_t svldnt1sh_gather_[u64]offset_u64(svbool_t pg, const int16_t *base, svuint64_t offsets) : "LDNT1SH Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets); + + + /// GatherVectorInt32SignExtendNonTemporal : Load 32-bit data and sign-extend, non-temporal + + /// svint64_t svldnt1sw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) : "LDNT1SW Zresult.D, Pg/Z, [Zbases.D, XZR]" + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, Vector addresses); + + /// svint64_t svldnt1sw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) : "LDNT1SW Zresult.D, Pg/Z, [Zbases.D, XZR]" + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, Vector addresses); + + /// svuint64_t svldnt1sw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) : "LDNT1SW Zresult.D, Pg/Z, [Zbases.D, XZR]" + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, Vector addresses); + + /// svuint64_t svldnt1sw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) : "LDNT1SW Zresult.D, Pg/Z, [Zbases.D, XZR]" + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, Vector addresses); + + /// svint64_t svldnt1sw_gather_[s64]index_s64(svbool_t pg, const int32_t *base, svint64_t indices) : "LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices); + + /// svint64_t svldnt1sw_gather_[s64]index_s64(svbool_t pg, const int32_t *base, svint64_t indices) : "LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices); + + /// svuint64_t svldnt1sw_gather_[s64]index_u64(svbool_t pg, const int32_t *base, svint64_t indices) : "LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices); + + /// svuint64_t svldnt1sw_gather_[s64]index_u64(svbool_t pg, const int32_t *base, svint64_t indices) : "LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices); + + /// svint64_t svldnt1sw_gather_[u64]index_s64(svbool_t pg, const int32_t *base, svuint64_t indices) : "LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices); + + /// svint64_t svldnt1sw_gather_[u64]index_s64(svbool_t pg, const int32_t *base, svuint64_t indices) : "LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices); + + /// svuint64_t svldnt1sw_gather_[u64]index_u64(svbool_t pg, const int32_t *base, svuint64_t indices) : "LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices); + + /// svuint64_t svldnt1sw_gather_[u64]index_u64(svbool_t pg, const int32_t *base, svuint64_t indices) : "LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices); + + + /// GatherVectorInt32WithByteOffsetsSignExtendNonTemporal : Load 32-bit data and sign-extend, non-temporal + + /// svint64_t svldnt1sw_gather_[s64]offset_s64(svbool_t pg, const int32_t *base, svint64_t offsets) : "LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets); + + /// svint64_t svldnt1sw_gather_[s64]offset_s64(svbool_t pg, const int32_t *base, svint64_t offsets) : "LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets); + + /// svuint64_t svldnt1sw_gather_[s64]offset_u64(svbool_t pg, const int32_t *base, svint64_t offsets) : "LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets); + + /// svuint64_t svldnt1sw_gather_[s64]offset_u64(svbool_t pg, const int32_t *base, svint64_t offsets) : "LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets); + + /// svint64_t svldnt1sw_gather_[u64]offset_s64(svbool_t pg, const int32_t *base, svuint64_t offsets) : "LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets); + + /// svint64_t svldnt1sw_gather_[u64]offset_s64(svbool_t pg, const int32_t *base, svuint64_t offsets) : "LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets); + + /// svuint64_t svldnt1sw_gather_[u64]offset_u64(svbool_t pg, const int32_t *base, svuint64_t offsets) : "LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets); + + /// svuint64_t svldnt1sw_gather_[u64]offset_u64(svbool_t pg, const int32_t *base, svuint64_t offsets) : "LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets); + + + /// GatherVectorNonTemporal : Unextended load, non-temporal + + /// svfloat32_t svldnt1_gather[_u32base]_f32(svbool_t pg, svuint32_t bases) : "LDNT1W Zresult.S, Pg/Z, [Zbases.S, XZR]" + public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses); + + /// svint32_t svldnt1_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) : "LDNT1W Zresult.S, Pg/Z, [Zbases.S, XZR]" + public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses); + + /// svuint32_t svldnt1_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) : "LDNT1W Zresult.S, Pg/Z, [Zbases.S, XZR]" + public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses); + + /// svfloat64_t svldnt1_gather[_u64base]_f64(svbool_t pg, svuint64_t bases) : "LDNT1D Zresult.D, Pg/Z, [Zbases.D, XZR]" + public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses); + + /// svint64_t svldnt1_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) : "LDNT1D Zresult.D, Pg/Z, [Zbases.D, XZR]" + public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses); + + /// svuint64_t svldnt1_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) : "LDNT1D Zresult.D, Pg/Z, [Zbases.D, XZR]" + public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses); + + /// svfloat32_t svldnt1_gather_[u32]offset[_f32](svbool_t pg, const float32_t *base, svuint32_t offsets) : "LDNT1W Zresult.S, Pg/Z, [Zoffsets.S, Xbase]" + public static unsafe Vector GatherVectorNonTemporal(Vector mask, float* address, Vector offsets); + + /// svint32_t svldnt1_gather_[u32]offset[_s32](svbool_t pg, const int32_t *base, svuint32_t offsets) : "LDNT1W Zresult.S, Pg/Z, [Zoffsets.S, Xbase]" + public static unsafe Vector GatherVectorNonTemporal(Vector mask, int* address, Vector offsets); + + /// svuint32_t svldnt1_gather_[u32]offset[_u32](svbool_t pg, const uint32_t *base, svuint32_t offsets) : "LDNT1W Zresult.S, Pg/Z, [Zoffsets.S, Xbase]" + public static unsafe Vector GatherVectorNonTemporal(Vector mask, uint* address, Vector offsets); + + /// svfloat64_t svldnt1_gather_[s64]offset[_f64](svbool_t pg, const float64_t *base, svint64_t offsets) : "LDNT1D Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorNonTemporal(Vector mask, double* address, Vector offsets); + + /// svint64_t svldnt1_gather_[s64]offset[_s64](svbool_t pg, const int64_t *base, svint64_t offsets) : "LDNT1D Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorNonTemporal(Vector mask, long* address, Vector offsets); + + /// svuint64_t svldnt1_gather_[s64]offset[_u64](svbool_t pg, const uint64_t *base, svint64_t offsets) : "LDNT1D Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorNonTemporal(Vector mask, ulong* address, Vector offsets); + + /// svfloat64_t svldnt1_gather_[u64]offset[_f64](svbool_t pg, const float64_t *base, svuint64_t offsets) : "LDNT1D Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorNonTemporal(Vector mask, double* address, Vector offsets); + + /// svint64_t svldnt1_gather_[u64]offset[_s64](svbool_t pg, const int64_t *base, svuint64_t offsets) : "LDNT1D Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorNonTemporal(Vector mask, long* address, Vector offsets); + + /// svuint64_t svldnt1_gather_[u64]offset[_u64](svbool_t pg, const uint64_t *base, svuint64_t offsets) : "LDNT1D Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorNonTemporal(Vector mask, ulong* address, Vector offsets); + + /// svfloat64_t svldnt1_gather_[s64]index[_f64](svbool_t pg, const float64_t *base, svint64_t indices) : "LDNT1D Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorNonTemporal(Vector mask, double* address, Vector indices); + + /// svint64_t svldnt1_gather_[s64]index[_s64](svbool_t pg, const int64_t *base, svint64_t indices) : "LDNT1D Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorNonTemporal(Vector mask, long* address, Vector indices); + + /// svuint64_t svldnt1_gather_[s64]index[_u64](svbool_t pg, const uint64_t *base, svint64_t indices) : "LDNT1D Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorNonTemporal(Vector mask, ulong* address, Vector indices); + + /// svfloat64_t svldnt1_gather_[u64]index[_f64](svbool_t pg, const float64_t *base, svuint64_t indices) : "LDNT1D Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorNonTemporal(Vector mask, double* address, Vector indices); + + /// svint64_t svldnt1_gather_[u64]index[_s64](svbool_t pg, const int64_t *base, svuint64_t indices) : "LDNT1D Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorNonTemporal(Vector mask, long* address, Vector indices); + + /// svuint64_t svldnt1_gather_[u64]index[_u64](svbool_t pg, const uint64_t *base, svuint64_t indices) : "LDNT1D Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorNonTemporal(Vector mask, ulong* address, Vector indices); + + + /// GatherVectorSByteSignExtendNonTemporal : Load 8-bit data and sign-extend, non-temporal + + /// svint32_t svldnt1sb_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) : "LDNT1SB Zresult.S, Pg/Z, [Zbases.S, XZR]" + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, Vector addresses); + + /// svuint32_t svldnt1sb_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) : "LDNT1SB Zresult.S, Pg/Z, [Zbases.S, XZR]" + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, Vector addresses); + + /// svint64_t svldnt1sb_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) : "LDNT1SB Zresult.D, Pg/Z, [Zbases.D, XZR]" + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, Vector addresses); + + /// svuint64_t svldnt1sb_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) : "LDNT1SB Zresult.D, Pg/Z, [Zbases.D, XZR]" + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, Vector addresses); + + /// svint32_t svldnt1sb_gather_[u32]offset_s32(svbool_t pg, const int8_t *base, svuint32_t offsets) : "LDNT1SB Zresult.S, Pg/Z, [Zoffsets.S, Xbase]" + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, sbyte* address, Vector offsets); + + /// svuint32_t svldnt1sb_gather_[u32]offset_u32(svbool_t pg, const int8_t *base, svuint32_t offsets) : "LDNT1SB Zresult.S, Pg/Z, [Zoffsets.S, Xbase]" + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, sbyte* address, Vector offsets); + + /// svint64_t svldnt1sb_gather_[s64]offset_s64(svbool_t pg, const int8_t *base, svint64_t offsets) : "LDNT1SB Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, sbyte* address, Vector offsets); + + /// svuint64_t svldnt1sb_gather_[s64]offset_u64(svbool_t pg, const int8_t *base, svint64_t offsets) : "LDNT1SB Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, sbyte* address, Vector offsets); + + /// svint64_t svldnt1sb_gather_[u64]offset_s64(svbool_t pg, const int8_t *base, svuint64_t offsets) : "LDNT1SB Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, sbyte* address, Vector offsets); + + /// svuint64_t svldnt1sb_gather_[u64]offset_u64(svbool_t pg, const int8_t *base, svuint64_t offsets) : "LDNT1SB Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, sbyte* address, Vector offsets); + + + /// GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal : Load 16-bit data and zero-extend, non-temporal + + /// svint32_t svldnt1uh_gather_[u32]offset_s32(svbool_t pg, const uint16_t *base, svuint32_t offsets) : "LDNT1H Zresult.S, Pg/Z, [Zoffsets.S, Xbase]" + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, ushort* address, Vector offsets); + + /// svuint32_t svldnt1uh_gather_[u32]offset_u32(svbool_t pg, const uint16_t *base, svuint32_t offsets) : "LDNT1H Zresult.S, Pg/Z, [Zoffsets.S, Xbase]" + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, ushort* address, Vector offsets); + + /// svint64_t svldnt1uh_gather_[s64]offset_s64(svbool_t pg, const uint16_t *base, svint64_t offsets) : "LDNT1H Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, ushort* address, Vector offsets); + + /// svuint64_t svldnt1uh_gather_[s64]offset_u64(svbool_t pg, const uint16_t *base, svint64_t offsets) : "LDNT1H Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, ushort* address, Vector offsets); + + /// svint64_t svldnt1uh_gather_[u64]offset_s64(svbool_t pg, const uint16_t *base, svuint64_t offsets) : "LDNT1H Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, ushort* address, Vector offsets); + + /// svuint64_t svldnt1uh_gather_[u64]offset_u64(svbool_t pg, const uint16_t *base, svuint64_t offsets) : "LDNT1H Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, ushort* address, Vector offsets); + + + /// GatherVectorUInt16ZeroExtendNonTemporal : Load 16-bit data and zero-extend, non-temporal + + /// svint32_t svldnt1uh_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) : "LDNT1H Zresult.S, Pg/Z, [Zbases.S, XZR]" + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, Vector addresses); + + /// svuint32_t svldnt1uh_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) : "LDNT1H Zresult.S, Pg/Z, [Zbases.S, XZR]" + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, Vector addresses); + + /// svint64_t svldnt1uh_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) : "LDNT1H Zresult.D, Pg/Z, [Zbases.D, XZR]" + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, Vector addresses); + + /// svuint64_t svldnt1uh_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) : "LDNT1H Zresult.D, Pg/Z, [Zbases.D, XZR]" + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, Vector addresses); + + /// svint64_t svldnt1uh_gather_[s64]index_s64(svbool_t pg, const uint16_t *base, svint64_t indices) : "LDNT1H Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, ushort* address, Vector indices); + + /// svuint64_t svldnt1uh_gather_[s64]index_u64(svbool_t pg, const uint16_t *base, svint64_t indices) : "LDNT1H Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, ushort* address, Vector indices); + + /// svint64_t svldnt1uh_gather_[u64]index_s64(svbool_t pg, const uint16_t *base, svuint64_t indices) : "LDNT1H Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, ushort* address, Vector indices); + + /// svuint64_t svldnt1uh_gather_[u64]index_u64(svbool_t pg, const uint16_t *base, svuint64_t indices) : "LDNT1H Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, ushort* address, Vector indices); + + + /// GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal : Load 32-bit data and zero-extend, non-temporal + + /// svint64_t svldnt1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets) : "LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets); + + /// svint64_t svldnt1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets) : "LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets); + + /// svuint64_t svldnt1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets) : "LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets); + + /// svuint64_t svldnt1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets) : "LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets); + + /// svint64_t svldnt1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets) : "LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets); + + /// svint64_t svldnt1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets) : "LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets); + + /// svuint64_t svldnt1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets) : "LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets); + + /// svuint64_t svldnt1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets) : "LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets); + + + /// GatherVectorUInt32ZeroExtendNonTemporal : Load 32-bit data and zero-extend, non-temporal + + /// svint64_t svldnt1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) : "LDNT1W Zresult.D, Pg/Z, [Zbases.D, XZR]" + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, Vector addresses); + + /// svint64_t svldnt1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) : "LDNT1W Zresult.D, Pg/Z, [Zbases.D, XZR]" + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, Vector addresses); + + /// svuint64_t svldnt1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) : "LDNT1W Zresult.D, Pg/Z, [Zbases.D, XZR]" + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, Vector addresses); + + /// svuint64_t svldnt1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) : "LDNT1W Zresult.D, Pg/Z, [Zbases.D, XZR]" + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, Vector addresses); + + /// svint64_t svldnt1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices) : "LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices); + + /// svint64_t svldnt1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices) : "LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices); + + /// svuint64_t svldnt1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices) : "LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices); + + /// svuint64_t svldnt1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices) : "LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices); + + /// svint64_t svldnt1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices) : "LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices); + + /// svint64_t svldnt1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices) : "LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices); + + /// svuint64_t svldnt1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices) : "LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices); + + /// svuint64_t svldnt1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices) : "LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase]" + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices); + + + /// total method signatures: 109 + /// total method names: 11 +} + + + /// Rejected: + /// public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1ub_gather[_u32base]_offset_s32 + /// public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1ub_gather[_u32base]_offset_u32 + /// public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1ub_gather[_u64base]_offset_s64 + /// public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1ub_gather[_u64base]_offset_u64 + /// public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, Vector addresses, long index); // svldnt1sh_gather[_u32base]_index_s32 + /// public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, Vector addresses, long index); // svldnt1sh_gather[_u32base]_index_u32 + /// public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, Vector addresses, long index); // svldnt1sh_gather[_u64base]_index_s64 + /// public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, Vector addresses, long index); // svldnt1sh_gather[_u64base]_index_u64 + /// public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1sh_gather[_u32base]_offset_s32 + /// public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1sh_gather[_u32base]_offset_u32 + /// public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1sh_gather[_u64base]_offset_s64 + /// public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1sh_gather[_u64base]_offset_u64 + /// public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, Vector addresses, long index); // svldnt1sw_gather[_u64base]_index_s64 + /// public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, Vector addresses, long index); // svldnt1sw_gather[_u64base]_index_s64 + /// public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, Vector addresses, long index); // svldnt1sw_gather[_u64base]_index_u64 + /// public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, Vector addresses, long index); // svldnt1sw_gather[_u64base]_index_u64 + /// public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1sw_gather[_u64base]_offset_s64 + /// public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1sw_gather[_u64base]_offset_s64 + /// public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1sw_gather[_u64base]_offset_u64 + /// public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1sw_gather[_u64base]_offset_u64 + /// public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1_gather[_u32base]_offset_f32 + /// public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1_gather[_u32base]_offset_s32 + /// public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1_gather[_u32base]_offset_u32 + /// public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1_gather[_u64base]_offset_f64 + /// public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1_gather[_u64base]_offset_s64 + /// public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1_gather[_u64base]_offset_u64 + /// public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses, long index); // svldnt1_gather[_u32base]_index_f32 + /// public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses, long index); // svldnt1_gather[_u32base]_index_s32 + /// public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses, long index); // svldnt1_gather[_u32base]_index_u32 + /// public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses, long index); // svldnt1_gather[_u64base]_index_f64 + /// public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses, long index); // svldnt1_gather[_u64base]_index_s64 + /// public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses, long index); // svldnt1_gather[_u64base]_index_u64 + /// public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1sb_gather[_u32base]_offset_s32 + /// public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1sb_gather[_u32base]_offset_u32 + /// public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1sb_gather[_u64base]_offset_s64 + /// public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1sb_gather[_u64base]_offset_u64 + /// public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1uh_gather[_u32base]_offset_s32 + /// public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1uh_gather[_u32base]_offset_u32 + /// public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1uh_gather[_u64base]_offset_s64 + /// public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1uh_gather[_u64base]_offset_u64 + /// public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, Vector addresses, long index); // svldnt1uh_gather[_u32base]_index_s32 + /// public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, Vector addresses, long index); // svldnt1uh_gather[_u32base]_index_u32 + /// public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, Vector addresses, long index); // svldnt1uh_gather[_u64base]_index_s64 + /// public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, Vector addresses, long index); // svldnt1uh_gather[_u64base]_index_u64 + /// public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1uw_gather[_u64base]_offset_s64 + /// public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1uw_gather[_u64base]_offset_s64 + /// public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1uw_gather[_u64base]_offset_u64 + /// public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, Vector addresses, long offset); // svldnt1uw_gather[_u64base]_offset_u64 + /// public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, Vector addresses, long index); // svldnt1uw_gather[_u64base]_index_s64 + /// public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, Vector addresses, long index); // svldnt1uw_gather[_u64base]_index_s64 + /// public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, Vector addresses, long index); // svldnt1uw_gather[_u64base]_index_u64 + /// public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, Vector addresses, long index); // svldnt1uw_gather[_u64base]_index_u64 + /// Total Rejected: 52 + + /// Total ACLE covered across API: 161 + diff --git a/sve_api/out_api/apiraw_FEAT_SVE2__mask.cs b/sve_api/out_api/apiraw_FEAT_SVE2__mask.cs new file mode 100644 index 0000000000000..af0df8e620b65 --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_SVE2__mask.cs @@ -0,0 +1,333 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve2 : AdvSimd /// Feature: FEAT_SVE2 Category: mask +{ + + /// T: byte, ushort, uint, ulong + public static unsafe Vector CreateWhileGreaterThanMask(int left, int right); // WHILEGT + + /// T: byte, ushort, uint, ulong + public static unsafe Vector CreateWhileGreaterThanMask(long left, long right); // WHILEGT + + /// T: byte, ushort, uint, ulong + public static unsafe Vector CreateWhileGreaterThanMask(uint left, uint right); // WHILEHI + + /// T: byte, ushort, uint, ulong + public static unsafe Vector CreateWhileGreaterThanMask(ulong left, ulong right); // WHILEHI + + /// T: byte, ushort, uint, ulong + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(int left, int right); // WHILEGE + + /// T: byte, ushort, uint, ulong + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(long left, long right); // WHILEGE + + /// T: byte, ushort, uint, ulong + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(uint left, uint right); // WHILEHS + + /// T: byte, ushort, uint, ulong + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(ulong left, ulong right); // WHILEHS + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CreateWhileReadAfterWriteMask(T* left, T* right); // WHILERW + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CreateWhileWriteAfterReadMask(T* left, T* right); // WHILEWR + + /// T: sbyte, short, byte, ushort + public static unsafe Vector Match(Vector mask, Vector left, Vector right); // MATCH + + /// T: sbyte, short, byte, ushort + public static unsafe Vector NoMatch(Vector mask, Vector left, Vector right); // NMATCH + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector SaturatingExtractNarrowingLower(Vector value); // SQXTNB or UQXTNB + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector SaturatingExtractNarrowingUpper(Vector even, Vector op); // SQXTNT or UQXTNT + + /// T: [byte, short], [ushort, int], [uint, long] + public static unsafe Vector SaturatingExtractUnsignedNarrowingLower(Vector value); // SQXTUNB + + /// T: [byte, short], [ushort, int], [uint, long] + public static unsafe Vector SaturatingExtractUnsignedNarrowingUpper(Vector even, Vector op); // SQXTUNT + + /// total method signatures: 16 + +} + + +/// Full API +public abstract partial class Sve2 : AdvSimd /// Feature: FEAT_SVE2 Category: mask +{ + /// CreateWhileGreaterThanMask : While decrementing scalar is greater than + + /// svbool_t svwhilegt_b8[_s32](int32_t op1, int32_t op2) : "WHILEGT Presult.B, Wop1, Wop2" + public static unsafe Vector CreateWhileGreaterThanMask(int left, int right); + + /// svbool_t svwhilegt_b8[_s64](int64_t op1, int64_t op2) : "WHILEGT Presult.B, Xop1, Xop2" + public static unsafe Vector CreateWhileGreaterThanMask(long left, long right); + + /// svbool_t svwhilegt_b8[_u32](uint32_t op1, uint32_t op2) : "WHILEHI Presult.B, Wop1, Wop2" + public static unsafe Vector CreateWhileGreaterThanMask(uint left, uint right); + + /// svbool_t svwhilegt_b8[_u64](uint64_t op1, uint64_t op2) : "WHILEHI Presult.B, Xop1, Xop2" + public static unsafe Vector CreateWhileGreaterThanMask(ulong left, ulong right); + + /// svbool_t svwhilegt_b16[_s32](int32_t op1, int32_t op2) : "WHILEGT Presult.H, Wop1, Wop2" + public static unsafe Vector CreateWhileGreaterThanMask(int left, int right); + + /// svbool_t svwhilegt_b16[_s64](int64_t op1, int64_t op2) : "WHILEGT Presult.H, Xop1, Xop2" + public static unsafe Vector CreateWhileGreaterThanMask(long left, long right); + + /// svbool_t svwhilegt_b16[_u32](uint32_t op1, uint32_t op2) : "WHILEHI Presult.H, Wop1, Wop2" + public static unsafe Vector CreateWhileGreaterThanMask(uint left, uint right); + + /// svbool_t svwhilegt_b16[_u64](uint64_t op1, uint64_t op2) : "WHILEHI Presult.H, Xop1, Xop2" + public static unsafe Vector CreateWhileGreaterThanMask(ulong left, ulong right); + + /// svbool_t svwhilegt_b32[_s32](int32_t op1, int32_t op2) : "WHILEGT Presult.S, Wop1, Wop2" + public static unsafe Vector CreateWhileGreaterThanMask(int left, int right); + + /// svbool_t svwhilegt_b32[_s64](int64_t op1, int64_t op2) : "WHILEGT Presult.S, Xop1, Xop2" + public static unsafe Vector CreateWhileGreaterThanMask(long left, long right); + + /// svbool_t svwhilegt_b32[_u32](uint32_t op1, uint32_t op2) : "WHILEHI Presult.S, Wop1, Wop2" + public static unsafe Vector CreateWhileGreaterThanMask(uint left, uint right); + + /// svbool_t svwhilegt_b32[_u64](uint64_t op1, uint64_t op2) : "WHILEHI Presult.S, Xop1, Xop2" + public static unsafe Vector CreateWhileGreaterThanMask(ulong left, ulong right); + + /// svbool_t svwhilegt_b64[_s32](int32_t op1, int32_t op2) : "WHILEGT Presult.D, Wop1, Wop2" + public static unsafe Vector CreateWhileGreaterThanMask(int left, int right); + + /// svbool_t svwhilegt_b64[_s64](int64_t op1, int64_t op2) : "WHILEGT Presult.D, Xop1, Xop2" + public static unsafe Vector CreateWhileGreaterThanMask(long left, long right); + + /// svbool_t svwhilegt_b64[_u32](uint32_t op1, uint32_t op2) : "WHILEHI Presult.D, Wop1, Wop2" + public static unsafe Vector CreateWhileGreaterThanMask(uint left, uint right); + + /// svbool_t svwhilegt_b64[_u64](uint64_t op1, uint64_t op2) : "WHILEHI Presult.D, Xop1, Xop2" + public static unsafe Vector CreateWhileGreaterThanMask(ulong left, ulong right); + + + /// CreateWhileGreaterThanOrEqualMask : While decrementing scalar is greater than or equal to + + /// svbool_t svwhilege_b8[_s32](int32_t op1, int32_t op2) : "WHILEGE Presult.B, Wop1, Wop2" + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(int left, int right); + + /// svbool_t svwhilege_b8[_s64](int64_t op1, int64_t op2) : "WHILEGE Presult.B, Xop1, Xop2" + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(long left, long right); + + /// svbool_t svwhilege_b8[_u32](uint32_t op1, uint32_t op2) : "WHILEHS Presult.B, Wop1, Wop2" + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(uint left, uint right); + + /// svbool_t svwhilege_b8[_u64](uint64_t op1, uint64_t op2) : "WHILEHS Presult.B, Xop1, Xop2" + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(ulong left, ulong right); + + /// svbool_t svwhilege_b16[_s32](int32_t op1, int32_t op2) : "WHILEGE Presult.H, Wop1, Wop2" + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(int left, int right); + + /// svbool_t svwhilege_b16[_s64](int64_t op1, int64_t op2) : "WHILEGE Presult.H, Xop1, Xop2" + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(long left, long right); + + /// svbool_t svwhilege_b16[_u32](uint32_t op1, uint32_t op2) : "WHILEHS Presult.H, Wop1, Wop2" + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(uint left, uint right); + + /// svbool_t svwhilege_b16[_u64](uint64_t op1, uint64_t op2) : "WHILEHS Presult.H, Xop1, Xop2" + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(ulong left, ulong right); + + /// svbool_t svwhilege_b32[_s32](int32_t op1, int32_t op2) : "WHILEGE Presult.S, Wop1, Wop2" + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(int left, int right); + + /// svbool_t svwhilege_b32[_s64](int64_t op1, int64_t op2) : "WHILEGE Presult.S, Xop1, Xop2" + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(long left, long right); + + /// svbool_t svwhilege_b32[_u32](uint32_t op1, uint32_t op2) : "WHILEHS Presult.S, Wop1, Wop2" + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(uint left, uint right); + + /// svbool_t svwhilege_b32[_u64](uint64_t op1, uint64_t op2) : "WHILEHS Presult.S, Xop1, Xop2" + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(ulong left, ulong right); + + /// svbool_t svwhilege_b64[_s32](int32_t op1, int32_t op2) : "WHILEGE Presult.D, Wop1, Wop2" + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(int left, int right); + + /// svbool_t svwhilege_b64[_s64](int64_t op1, int64_t op2) : "WHILEGE Presult.D, Xop1, Xop2" + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(long left, long right); + + /// svbool_t svwhilege_b64[_u32](uint32_t op1, uint32_t op2) : "WHILEHS Presult.D, Wop1, Wop2" + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(uint left, uint right); + + /// svbool_t svwhilege_b64[_u64](uint64_t op1, uint64_t op2) : "WHILEHS Presult.D, Xop1, Xop2" + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(ulong left, ulong right); + + + /// CreateWhileReadAfterWriteMask : While free of read-after-write conflicts + + /// svbool_t svwhilerw[_f32](const float32_t *op1, const float32_t *op2) : "WHILERW Presult.S, Xop1, Xop2" + public static unsafe Vector CreateWhileReadAfterWriteMask(float* left, float* right); + + /// svbool_t svwhilerw[_f64](const float64_t *op1, const float64_t *op2) : "WHILERW Presult.D, Xop1, Xop2" + public static unsafe Vector CreateWhileReadAfterWriteMask(double* left, double* right); + + /// svbool_t svwhilerw[_s8](const int8_t *op1, const int8_t *op2) : "WHILERW Presult.B, Xop1, Xop2" + public static unsafe Vector CreateWhileReadAfterWriteMask(sbyte* left, sbyte* right); + + /// svbool_t svwhilerw[_s16](const int16_t *op1, const int16_t *op2) : "WHILERW Presult.H, Xop1, Xop2" + public static unsafe Vector CreateWhileReadAfterWriteMask(short* left, short* right); + + /// svbool_t svwhilerw[_s32](const int32_t *op1, const int32_t *op2) : "WHILERW Presult.S, Xop1, Xop2" + public static unsafe Vector CreateWhileReadAfterWriteMask(int* left, int* right); + + /// svbool_t svwhilerw[_s64](const int64_t *op1, const int64_t *op2) : "WHILERW Presult.D, Xop1, Xop2" + public static unsafe Vector CreateWhileReadAfterWriteMask(long* left, long* right); + + /// svbool_t svwhilerw[_u8](const uint8_t *op1, const uint8_t *op2) : "WHILERW Presult.B, Xop1, Xop2" + public static unsafe Vector CreateWhileReadAfterWriteMask(byte* left, byte* right); + + /// svbool_t svwhilerw[_u16](const uint16_t *op1, const uint16_t *op2) : "WHILERW Presult.H, Xop1, Xop2" + public static unsafe Vector CreateWhileReadAfterWriteMask(ushort* left, ushort* right); + + /// svbool_t svwhilerw[_u32](const uint32_t *op1, const uint32_t *op2) : "WHILERW Presult.S, Xop1, Xop2" + public static unsafe Vector CreateWhileReadAfterWriteMask(uint* left, uint* right); + + /// svbool_t svwhilerw[_u64](const uint64_t *op1, const uint64_t *op2) : "WHILERW Presult.D, Xop1, Xop2" + public static unsafe Vector CreateWhileReadAfterWriteMask(ulong* left, ulong* right); + + + /// CreateWhileWriteAfterReadMask : While free of write-after-read conflicts + + /// svbool_t svwhilewr[_f32](const float32_t *op1, const float32_t *op2) : "WHILEWR Presult.S, Xop1, Xop2" + public static unsafe Vector CreateWhileWriteAfterReadMask(float* left, float* right); + + /// svbool_t svwhilewr[_f64](const float64_t *op1, const float64_t *op2) : "WHILEWR Presult.D, Xop1, Xop2" + public static unsafe Vector CreateWhileWriteAfterReadMask(double* left, double* right); + + /// svbool_t svwhilewr[_s8](const int8_t *op1, const int8_t *op2) : "WHILEWR Presult.B, Xop1, Xop2" + public static unsafe Vector CreateWhileWriteAfterReadMask(sbyte* left, sbyte* right); + + /// svbool_t svwhilewr[_s16](const int16_t *op1, const int16_t *op2) : "WHILEWR Presult.H, Xop1, Xop2" + public static unsafe Vector CreateWhileWriteAfterReadMask(short* left, short* right); + + /// svbool_t svwhilewr[_s32](const int32_t *op1, const int32_t *op2) : "WHILEWR Presult.S, Xop1, Xop2" + public static unsafe Vector CreateWhileWriteAfterReadMask(int* left, int* right); + + /// svbool_t svwhilewr[_s64](const int64_t *op1, const int64_t *op2) : "WHILEWR Presult.D, Xop1, Xop2" + public static unsafe Vector CreateWhileWriteAfterReadMask(long* left, long* right); + + /// svbool_t svwhilewr[_u8](const uint8_t *op1, const uint8_t *op2) : "WHILEWR Presult.B, Xop1, Xop2" + public static unsafe Vector CreateWhileWriteAfterReadMask(byte* left, byte* right); + + /// svbool_t svwhilewr[_u16](const uint16_t *op1, const uint16_t *op2) : "WHILEWR Presult.H, Xop1, Xop2" + public static unsafe Vector CreateWhileWriteAfterReadMask(ushort* left, ushort* right); + + /// svbool_t svwhilewr[_u32](const uint32_t *op1, const uint32_t *op2) : "WHILEWR Presult.S, Xop1, Xop2" + public static unsafe Vector CreateWhileWriteAfterReadMask(uint* left, uint* right); + + /// svbool_t svwhilewr[_u64](const uint64_t *op1, const uint64_t *op2) : "WHILEWR Presult.D, Xop1, Xop2" + public static unsafe Vector CreateWhileWriteAfterReadMask(ulong* left, ulong* right); + + + /// Match : Detect any matching elements + + /// svbool_t svmatch[_s8](svbool_t pg, svint8_t op1, svint8_t op2) : "MATCH Presult.B, Pg/Z, Zop1.B, Zop2.B" + public static unsafe Vector Match(Vector mask, Vector left, Vector right); + + /// svbool_t svmatch[_s16](svbool_t pg, svint16_t op1, svint16_t op2) : "MATCH Presult.H, Pg/Z, Zop1.H, Zop2.H" + public static unsafe Vector Match(Vector mask, Vector left, Vector right); + + /// svbool_t svmatch[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) : "MATCH Presult.B, Pg/Z, Zop1.B, Zop2.B" + public static unsafe Vector Match(Vector mask, Vector left, Vector right); + + /// svbool_t svmatch[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) : "MATCH Presult.H, Pg/Z, Zop1.H, Zop2.H" + public static unsafe Vector Match(Vector mask, Vector left, Vector right); + + + /// NoMatch : Detect no matching elements + + /// svbool_t svnmatch[_s8](svbool_t pg, svint8_t op1, svint8_t op2) : "NMATCH Presult.B, Pg/Z, Zop1.B, Zop2.B" + public static unsafe Vector NoMatch(Vector mask, Vector left, Vector right); + + /// svbool_t svnmatch[_s16](svbool_t pg, svint16_t op1, svint16_t op2) : "NMATCH Presult.H, Pg/Z, Zop1.H, Zop2.H" + public static unsafe Vector NoMatch(Vector mask, Vector left, Vector right); + + /// svbool_t svnmatch[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) : "NMATCH Presult.B, Pg/Z, Zop1.B, Zop2.B" + public static unsafe Vector NoMatch(Vector mask, Vector left, Vector right); + + /// svbool_t svnmatch[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) : "NMATCH Presult.H, Pg/Z, Zop1.H, Zop2.H" + public static unsafe Vector NoMatch(Vector mask, Vector left, Vector right); + + + /// SaturatingExtractNarrowingLower : Saturating extract narrow (bottom) + + /// svint8_t svqxtnb[_s16](svint16_t op) : "SQXTNB Zresult.B, Zop.H" + public static unsafe Vector SaturatingExtractNarrowingLower(Vector value); + + /// svint16_t svqxtnb[_s32](svint32_t op) : "SQXTNB Zresult.H, Zop.S" + public static unsafe Vector SaturatingExtractNarrowingLower(Vector value); + + /// svint32_t svqxtnb[_s64](svint64_t op) : "SQXTNB Zresult.S, Zop.D" + public static unsafe Vector SaturatingExtractNarrowingLower(Vector value); + + /// svuint8_t svqxtnb[_u16](svuint16_t op) : "UQXTNB Zresult.B, Zop.H" + public static unsafe Vector SaturatingExtractNarrowingLower(Vector value); + + /// svuint16_t svqxtnb[_u32](svuint32_t op) : "UQXTNB Zresult.H, Zop.S" + public static unsafe Vector SaturatingExtractNarrowingLower(Vector value); + + /// svuint32_t svqxtnb[_u64](svuint64_t op) : "UQXTNB Zresult.S, Zop.D" + public static unsafe Vector SaturatingExtractNarrowingLower(Vector value); + + + /// SaturatingExtractNarrowingUpper : Saturating extract narrow (top) + + /// svint8_t svqxtnt[_s16](svint8_t even, svint16_t op) : "SQXTNT Ztied.B, Zop.H" + public static unsafe Vector SaturatingExtractNarrowingUpper(Vector even, Vector op); + + /// svint16_t svqxtnt[_s32](svint16_t even, svint32_t op) : "SQXTNT Ztied.H, Zop.S" + public static unsafe Vector SaturatingExtractNarrowingUpper(Vector even, Vector op); + + /// svint32_t svqxtnt[_s64](svint32_t even, svint64_t op) : "SQXTNT Ztied.S, Zop.D" + public static unsafe Vector SaturatingExtractNarrowingUpper(Vector even, Vector op); + + /// svuint8_t svqxtnt[_u16](svuint8_t even, svuint16_t op) : "UQXTNT Ztied.B, Zop.H" + public static unsafe Vector SaturatingExtractNarrowingUpper(Vector even, Vector op); + + /// svuint16_t svqxtnt[_u32](svuint16_t even, svuint32_t op) : "UQXTNT Ztied.H, Zop.S" + public static unsafe Vector SaturatingExtractNarrowingUpper(Vector even, Vector op); + + /// svuint32_t svqxtnt[_u64](svuint32_t even, svuint64_t op) : "UQXTNT Ztied.S, Zop.D" + public static unsafe Vector SaturatingExtractNarrowingUpper(Vector even, Vector op); + + + /// SaturatingExtractUnsignedNarrowingLower : Saturating extract unsigned narrow (bottom) + + /// svuint8_t svqxtunb[_s16](svint16_t op) : "SQXTUNB Zresult.B, Zop.H" + public static unsafe Vector SaturatingExtractUnsignedNarrowingLower(Vector value); + + /// svuint16_t svqxtunb[_s32](svint32_t op) : "SQXTUNB Zresult.H, Zop.S" + public static unsafe Vector SaturatingExtractUnsignedNarrowingLower(Vector value); + + /// svuint32_t svqxtunb[_s64](svint64_t op) : "SQXTUNB Zresult.S, Zop.D" + public static unsafe Vector SaturatingExtractUnsignedNarrowingLower(Vector value); + + + /// SaturatingExtractUnsignedNarrowingUpper : Saturating extract unsigned narrow (top) + + /// svuint8_t svqxtunt[_s16](svuint8_t even, svint16_t op) : "SQXTUNT Ztied.B, Zop.H" + public static unsafe Vector SaturatingExtractUnsignedNarrowingUpper(Vector even, Vector op); + + /// svuint16_t svqxtunt[_s32](svuint16_t even, svint32_t op) : "SQXTUNT Ztied.H, Zop.S" + public static unsafe Vector SaturatingExtractUnsignedNarrowingUpper(Vector even, Vector op); + + /// svuint32_t svqxtunt[_s64](svuint32_t even, svint64_t op) : "SQXTUNT Ztied.S, Zop.D" + public static unsafe Vector SaturatingExtractUnsignedNarrowingUpper(Vector even, Vector op); + + + /// total method signatures: 78 + /// total method names: 10 +} + + + /// Total ACLE covered across API: 78 + diff --git a/sve_api/out_api/apiraw_FEAT_SVE2__maths.cs b/sve_api/out_api/apiraw_FEAT_SVE2__maths.cs new file mode 100644 index 0000000000000..05b8c2c241ec6 --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_SVE2__maths.cs @@ -0,0 +1,2220 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve2 : AdvSimd /// Feature: FEAT_SVE2 Category: maths +{ + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right); // SABA or UABA // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, Vector right); // SABALB or UABALB // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, Vector right); // SABALT or UABALT // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, Vector right); // SABDLB or UABDLB + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, Vector right); // SABDLT or UABDLT + + /// T: uint, ulong + public static unsafe Vector AddCarryWideningLower(Vector op1, Vector op2, Vector op3); // ADCLB // MOVPRFX + + /// T: uint, ulong + public static unsafe Vector AddCarryWideningUpper(Vector op1, Vector op2, Vector op3); // ADCLT // MOVPRFX + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector AddHighNarowingLower(Vector left, Vector right); // ADDHNB + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, Vector right); // ADDHNT + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector AddPairwise(Vector left, Vector right); // FADDP or ADDP // predicated, MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector AddPairwiseWidening(Vector left, Vector right); // SADALP or UADALP // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector AddSaturate(Vector left, Vector right); // SQADD or UQADD // predicated, MOVPRFX + + /// T: [byte, sbyte], [ushort, short], [uint, int], [ulong, long] + public static unsafe Vector AddSaturateWithSignedAddend(Vector left, Vector right); // USQADD // predicated, MOVPRFX + + /// T: [sbyte, byte], [short, ushort], [int, uint], [long, ulong] + public static unsafe Vector AddSaturateWithUnsignedAddend(Vector left, Vector right); // SUQADD // predicated, MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector AddWideLower(Vector left, Vector right); // SADDWB or UADDWB + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector AddWideUpper(Vector left, Vector right); // SADDWT or UADDWT + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector AddWideningLower(Vector left, Vector right); // SADDLB or UADDLB + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector AddWideningLowerUpper(Vector left, Vector right); // SADDLBT + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector AddWideningUpper(Vector left, Vector right); // SADDLT or UADDLT + + /// T: [int, sbyte], [long, short] + public static unsafe Vector DotProductComplex(Vector op1, Vector op2, Vector op3, [ConstantExpected] byte rotation); // CDOT // MOVPRFX + + /// T: [int, sbyte], [long, short] + public static unsafe Vector DotProductComplex(Vector op1, Vector op2, Vector op3, ulong imm_index, [ConstantExpected] byte rotation); // CDOT // MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector HalvingAdd(Vector left, Vector right); // SHADD or UHADD // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector HalvingSubtract(Vector left, Vector right); // SHSUB or UHSUB or SHSUBR or UHSUBR // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right); // SHSUBR or UHSUBR or SHSUB or UHSUB // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector MaxNumberPairwise(Vector left, Vector right); // FMAXNMP // predicated, MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector MaxPairwise(Vector left, Vector right); // FMAXP or SMAXP or UMAXP // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector MinNumberPairwise(Vector left, Vector right); // FMINNMP // predicated, MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector MinPairwise(Vector left, Vector right); // FMINP or SMINP or UMINP // predicated, MOVPRFX + + /// T: short, int, long, ushort, uint, ulong + public static unsafe Vector MultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // MLA // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3); // SMLALB or UMLALB // MOVPRFX + + /// T: [int, short], [long, int], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index); // SMLALB or UMLALB // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3); // SMLALT or UMLALT // MOVPRFX + + /// T: [int, short], [long, int], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index); // SMLALT or UMLALT // MOVPRFX + + /// T: short, int, long, ushort, uint, ulong + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex); // MUL + + /// T: short, int, long, ushort, uint, ulong + public static unsafe Vector MultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // MLS // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3); // SMLSLB or UMLSLB // MOVPRFX + + /// T: [int, short], [long, int], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index); // SMLSLB or UMLSLB // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3); // SMLSLT or UMLSLT // MOVPRFX + + /// T: [int, short], [long, int], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index); // SMLSLT or UMLSLT // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyWideningLower(Vector left, Vector right); // SMULLB or UMULLB + + /// T: [int, short], [long, int], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyWideningLower(Vector op1, Vector op2, ulong imm_index); // SMULLB or UMULLB + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyWideningUpper(Vector left, Vector right); // SMULLT or UMULLT + + /// T: [int, short], [long, int], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyWideningUpper(Vector op1, Vector op2, ulong imm_index); // SMULLT or UMULLT + + public static unsafe Vector PolynomialMultiply(Vector left, Vector right); // PMUL + + /// T: [ushort, byte], [ulong, uint] + public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, Vector right); // PMULLB + + /// T: byte, uint + public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, Vector right); // PMULLB + + /// T: [ushort, byte], [ulong, uint] + public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, Vector right); // PMULLT + + /// T: byte, uint + public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, Vector right); // PMULLT + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector RoundingAddHighNarowingLower(Vector left, Vector right); // RADDHNB + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, Vector right); // RADDHNT + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right); // SRHADD or URHADD // predicated, MOVPRFX + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, Vector right); // RSUBHNB + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, Vector right); // RSUBHNT + + /// T: sbyte, short, int, long + public static unsafe Vector SaturatingAbs(Vector value); // SQABS // predicated, MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLower(Vector op1, Vector op2, Vector op3); // SQDMLALB // MOVPRFX + + /// T: [int, short], [long, int] + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index); // SQDMLALB // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLowerUpper(Vector op1, Vector op2, Vector op3); // SQDMLALBT // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector SaturatingDoublingMultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3); // SQDMLALT // MOVPRFX + + /// T: [int, short], [long, int] + public static unsafe Vector SaturatingDoublingMultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index); // SQDMLALT // MOVPRFX + + /// T: sbyte, short, int, long + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector left, Vector right); // SQDMULH + + /// T: short, int, long + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector op1, Vector op2, ulong imm_index); // SQDMULH + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3); // SQDMLSLB // MOVPRFX + + /// T: [int, short], [long, int] + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index); // SQDMLSLB // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLowerUpper(Vector op1, Vector op2, Vector op3); // SQDMLSLBT // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3); // SQDMLSLT // MOVPRFX + + /// T: [int, short], [long, int] + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index); // SQDMLSLT // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector SaturatingDoublingMultiplyWideningLower(Vector left, Vector right); // SQDMULLB + + /// T: [int, short], [long, int] + public static unsafe Vector SaturatingDoublingMultiplyWideningLower(Vector op1, Vector op2, ulong imm_index); // SQDMULLB + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector SaturatingDoublingMultiplyWideningUpper(Vector left, Vector right); // SQDMULLT + + /// T: [int, short], [long, int] + public static unsafe Vector SaturatingDoublingMultiplyWideningUpper(Vector op1, Vector op2, ulong imm_index); // SQDMULLT + + /// T: sbyte, short, int, long + public static unsafe Vector SaturatingNegate(Vector value); // SQNEG // predicated, MOVPRFX + + /// T: sbyte, short, int, long + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3); // SQRDMLAH // MOVPRFX + + /// T: short, int, long + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3, ulong imm_index); // SQRDMLAH // MOVPRFX + + /// T: sbyte, short, int, long + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector left, Vector right); // SQRDMULH + + /// T: short, int, long + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector op1, Vector op2, ulong imm_index); // SQRDMULH + + /// T: sbyte, short, int, long + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3); // SQRDMLSH // MOVPRFX + + /// T: short, int, long + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3, ulong imm_index); // SQRDMLSH // MOVPRFX + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector SubtractHighNarowingLower(Vector left, Vector right); // SUBHNB + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, Vector right); // SUBHNT + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector SubtractSaturate(Vector left, Vector right); // SQSUB or UQSUB or SQSUBR or UQSUBR // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right); // SQSUBR or UQSUBR or SQSUB or UQSUB // predicated, MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector SubtractWideLower(Vector left, Vector right); // SSUBWB or USUBWB + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector SubtractWideUpper(Vector left, Vector right); // SSUBWT or USUBWT + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector SubtractWideningLower(Vector left, Vector right); // SSUBLB or USUBLB + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector SubtractWideningLowerUpper(Vector left, Vector right); // SSUBLBT + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector SubtractWideningUpper(Vector left, Vector right); // SSUBLT or USUBLT + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector SubtractWideningUpperLower(Vector left, Vector right); // SSUBLTB + + /// T: uint, ulong + public static unsafe Vector SubtractWithBorrowWideningLower(Vector op1, Vector op2, Vector op3); // SBCLB // MOVPRFX + + /// T: uint, ulong + public static unsafe Vector SubtractWithBorrowWideningUpper(Vector op1, Vector op2, Vector op3); // SBCLT // MOVPRFX + + /// total method signatures: 89 + +} + + +/// Full API +public abstract partial class Sve2 : AdvSimd /// Feature: FEAT_SVE2 Category: maths +{ + /// AbsoluteDifferenceAdd : Absolute difference and accumulate + + /// svint8_t svaba[_s8](svint8_t op1, svint8_t op2, svint8_t op3) : "SABA Ztied1.B, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; SABA Zresult.B, Zop2.B, Zop3.B" + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right); + + /// svint16_t svaba[_s16](svint16_t op1, svint16_t op2, svint16_t op3) : "SABA Ztied1.H, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; SABA Zresult.H, Zop2.H, Zop3.H" + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right); + + /// svint32_t svaba[_s32](svint32_t op1, svint32_t op2, svint32_t op3) : "SABA Ztied1.S, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; SABA Zresult.S, Zop2.S, Zop3.S" + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right); + + /// svint64_t svaba[_s64](svint64_t op1, svint64_t op2, svint64_t op3) : "SABA Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; SABA Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right); + + /// svuint8_t svaba[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3) : "UABA Ztied1.B, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; UABA Zresult.B, Zop2.B, Zop3.B" + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right); + + /// svuint16_t svaba[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3) : "UABA Ztied1.H, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; UABA Zresult.H, Zop2.H, Zop3.H" + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right); + + /// svuint32_t svaba[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) : "UABA Ztied1.S, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; UABA Zresult.S, Zop2.S, Zop3.S" + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right); + + /// svuint64_t svaba[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) : "UABA Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; UABA Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right); + + + /// AbsoluteDifferenceAddWideningLower : Absolute difference and accumulate long (bottom) + + /// svint16_t svabalb[_s16](svint16_t op1, svint8_t op2, svint8_t op3) : "SABALB Ztied1.H, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; SABALB Zresult.H, Zop2.B, Zop3.B" + public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, Vector right); + + /// svint32_t svabalb[_s32](svint32_t op1, svint16_t op2, svint16_t op3) : "SABALB Ztied1.S, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; SABALB Zresult.S, Zop2.H, Zop3.H" + public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, Vector right); + + /// svint64_t svabalb[_s64](svint64_t op1, svint32_t op2, svint32_t op3) : "SABALB Ztied1.D, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; SABALB Zresult.D, Zop2.S, Zop3.S" + public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, Vector right); + + /// svuint16_t svabalb[_u16](svuint16_t op1, svuint8_t op2, svuint8_t op3) : "UABALB Ztied1.H, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; UABALB Zresult.H, Zop2.B, Zop3.B" + public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, Vector right); + + /// svuint32_t svabalb[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3) : "UABALB Ztied1.S, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; UABALB Zresult.S, Zop2.H, Zop3.H" + public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, Vector right); + + /// svuint64_t svabalb[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3) : "UABALB Ztied1.D, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; UABALB Zresult.D, Zop2.S, Zop3.S" + public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, Vector right); + + + /// AbsoluteDifferenceAddWideningUpper : Absolute difference and accumulate long (top) + + /// svint16_t svabalt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) : "SABALT Ztied1.H, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; SABALT Zresult.H, Zop2.B, Zop3.B" + public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, Vector right); + + /// svint32_t svabalt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) : "SABALT Ztied1.S, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; SABALT Zresult.S, Zop2.H, Zop3.H" + public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, Vector right); + + /// svint64_t svabalt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) : "SABALT Ztied1.D, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; SABALT Zresult.D, Zop2.S, Zop3.S" + public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, Vector right); + + /// svuint16_t svabalt[_u16](svuint16_t op1, svuint8_t op2, svuint8_t op3) : "UABALT Ztied1.H, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; UABALT Zresult.H, Zop2.B, Zop3.B" + public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, Vector right); + + /// svuint32_t svabalt[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3) : "UABALT Ztied1.S, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; UABALT Zresult.S, Zop2.H, Zop3.H" + public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, Vector right); + + /// svuint64_t svabalt[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3) : "UABALT Ztied1.D, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; UABALT Zresult.D, Zop2.S, Zop3.S" + public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, Vector right); + + + /// AbsoluteDifferenceWideningLower : Absolute difference long (bottom) + + /// svint16_t svabdlb[_s16](svint8_t op1, svint8_t op2) : "SABDLB Zresult.H, Zop1.B, Zop2.B" + public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, Vector right); + + /// svint32_t svabdlb[_s32](svint16_t op1, svint16_t op2) : "SABDLB Zresult.S, Zop1.H, Zop2.H" + public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, Vector right); + + /// svint64_t svabdlb[_s64](svint32_t op1, svint32_t op2) : "SABDLB Zresult.D, Zop1.S, Zop2.S" + public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, Vector right); + + /// svuint16_t svabdlb[_u16](svuint8_t op1, svuint8_t op2) : "UABDLB Zresult.H, Zop1.B, Zop2.B" + public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, Vector right); + + /// svuint32_t svabdlb[_u32](svuint16_t op1, svuint16_t op2) : "UABDLB Zresult.S, Zop1.H, Zop2.H" + public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, Vector right); + + /// svuint64_t svabdlb[_u64](svuint32_t op1, svuint32_t op2) : "UABDLB Zresult.D, Zop1.S, Zop2.S" + public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, Vector right); + + + /// AbsoluteDifferenceWideningUpper : Absolute difference long (top) + + /// svint16_t svabdlt[_s16](svint8_t op1, svint8_t op2) : "SABDLT Zresult.H, Zop1.B, Zop2.B" + public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, Vector right); + + /// svint32_t svabdlt[_s32](svint16_t op1, svint16_t op2) : "SABDLT Zresult.S, Zop1.H, Zop2.H" + public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, Vector right); + + /// svint64_t svabdlt[_s64](svint32_t op1, svint32_t op2) : "SABDLT Zresult.D, Zop1.S, Zop2.S" + public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, Vector right); + + /// svuint16_t svabdlt[_u16](svuint8_t op1, svuint8_t op2) : "UABDLT Zresult.H, Zop1.B, Zop2.B" + public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, Vector right); + + /// svuint32_t svabdlt[_u32](svuint16_t op1, svuint16_t op2) : "UABDLT Zresult.S, Zop1.H, Zop2.H" + public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, Vector right); + + /// svuint64_t svabdlt[_u64](svuint32_t op1, svuint32_t op2) : "UABDLT Zresult.D, Zop1.S, Zop2.S" + public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, Vector right); + + + /// AddCarryWideningLower : Add with carry long (bottom) + + /// svuint32_t svadclb[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) : "ADCLB Ztied1.S, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; ADCLB Zresult.S, Zop2.S, Zop3.S" + public static unsafe Vector AddCarryWideningLower(Vector op1, Vector op2, Vector op3); + + /// svuint64_t svadclb[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) : "ADCLB Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; ADCLB Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector AddCarryWideningLower(Vector op1, Vector op2, Vector op3); + + + /// AddCarryWideningUpper : Add with carry long (top) + + /// svuint32_t svadclt[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) : "ADCLT Ztied1.S, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; ADCLT Zresult.S, Zop2.S, Zop3.S" + public static unsafe Vector AddCarryWideningUpper(Vector op1, Vector op2, Vector op3); + + /// svuint64_t svadclt[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) : "ADCLT Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; ADCLT Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector AddCarryWideningUpper(Vector op1, Vector op2, Vector op3); + + + /// AddHighNarowingLower : Add narrow high part (bottom) + + /// svint8_t svaddhnb[_s16](svint16_t op1, svint16_t op2) : "ADDHNB Zresult.B, Zop1.H, Zop2.H" + public static unsafe Vector AddHighNarowingLower(Vector left, Vector right); + + /// svint16_t svaddhnb[_s32](svint32_t op1, svint32_t op2) : "ADDHNB Zresult.H, Zop1.S, Zop2.S" + public static unsafe Vector AddHighNarowingLower(Vector left, Vector right); + + /// svint32_t svaddhnb[_s64](svint64_t op1, svint64_t op2) : "ADDHNB Zresult.S, Zop1.D, Zop2.D" + public static unsafe Vector AddHighNarowingLower(Vector left, Vector right); + + /// svuint8_t svaddhnb[_u16](svuint16_t op1, svuint16_t op2) : "ADDHNB Zresult.B, Zop1.H, Zop2.H" + public static unsafe Vector AddHighNarowingLower(Vector left, Vector right); + + /// svuint16_t svaddhnb[_u32](svuint32_t op1, svuint32_t op2) : "ADDHNB Zresult.H, Zop1.S, Zop2.S" + public static unsafe Vector AddHighNarowingLower(Vector left, Vector right); + + /// svuint32_t svaddhnb[_u64](svuint64_t op1, svuint64_t op2) : "ADDHNB Zresult.S, Zop1.D, Zop2.D" + public static unsafe Vector AddHighNarowingLower(Vector left, Vector right); + + + /// AddHighNarowingUpper : Add narrow high part (top) + + /// svint8_t svaddhnt[_s16](svint8_t even, svint16_t op1, svint16_t op2) : "ADDHNT Ztied.B, Zop1.H, Zop2.H" + public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, Vector right); + + /// svint16_t svaddhnt[_s32](svint16_t even, svint32_t op1, svint32_t op2) : "ADDHNT Ztied.H, Zop1.S, Zop2.S" + public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, Vector right); + + /// svint32_t svaddhnt[_s64](svint32_t even, svint64_t op1, svint64_t op2) : "ADDHNT Ztied.S, Zop1.D, Zop2.D" + public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, Vector right); + + /// svuint8_t svaddhnt[_u16](svuint8_t even, svuint16_t op1, svuint16_t op2) : "ADDHNT Ztied.B, Zop1.H, Zop2.H" + public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, Vector right); + + /// svuint16_t svaddhnt[_u32](svuint16_t even, svuint32_t op1, svuint32_t op2) : "ADDHNT Ztied.H, Zop1.S, Zop2.S" + public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, Vector right); + + /// svuint32_t svaddhnt[_u64](svuint32_t even, svuint64_t op1, svuint64_t op2) : "ADDHNT Ztied.S, Zop1.D, Zop2.D" + public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, Vector right); + + + /// AddPairwise : Add pairwise + + /// svfloat32_t svaddp[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FADDP Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; FADDP Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svfloat32_t svaddp[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FADDP Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; FADDP Zresult.S, Pg/M, Zresult.S, Zop2.S" + public static unsafe Vector AddPairwise(Vector left, Vector right); + + /// svfloat64_t svaddp[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FADDP Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; FADDP Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svfloat64_t svaddp[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FADDP Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; FADDP Zresult.D, Pg/M, Zresult.D, Zop2.D" + public static unsafe Vector AddPairwise(Vector left, Vector right); + + /// svint8_t svaddp[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) : "ADDP Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; ADDP Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svaddp[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) : "ADDP Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; ADDP Zresult.B, Pg/M, Zresult.B, Zop2.B" + public static unsafe Vector AddPairwise(Vector left, Vector right); + + /// svint16_t svaddp[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) : "ADDP Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; ADDP Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svaddp[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) : "ADDP Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; ADDP Zresult.H, Pg/M, Zresult.H, Zop2.H" + public static unsafe Vector AddPairwise(Vector left, Vector right); + + /// svint32_t svaddp[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) : "ADDP Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; ADDP Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svaddp[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) : "ADDP Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; ADDP Zresult.S, Pg/M, Zresult.S, Zop2.S" + public static unsafe Vector AddPairwise(Vector left, Vector right); + + /// svint64_t svaddp[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) : "ADDP Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; ADDP Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svaddp[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) : "ADDP Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; ADDP Zresult.D, Pg/M, Zresult.D, Zop2.D" + public static unsafe Vector AddPairwise(Vector left, Vector right); + + /// svuint8_t svaddp[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) : "ADDP Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; ADDP Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svaddp[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) : "ADDP Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; ADDP Zresult.B, Pg/M, Zresult.B, Zop2.B" + public static unsafe Vector AddPairwise(Vector left, Vector right); + + /// svuint16_t svaddp[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) : "ADDP Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; ADDP Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svaddp[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) : "ADDP Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; ADDP Zresult.H, Pg/M, Zresult.H, Zop2.H" + public static unsafe Vector AddPairwise(Vector left, Vector right); + + /// svuint32_t svaddp[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) : "ADDP Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; ADDP Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svaddp[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) : "ADDP Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; ADDP Zresult.S, Pg/M, Zresult.S, Zop2.S" + public static unsafe Vector AddPairwise(Vector left, Vector right); + + /// svuint64_t svaddp[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) : "ADDP Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; ADDP Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svaddp[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) : "ADDP Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; ADDP Zresult.D, Pg/M, Zresult.D, Zop2.D" + public static unsafe Vector AddPairwise(Vector left, Vector right); + + + /// AddPairwiseWidening : Add and accumulate long pairwise + + /// svint16_t svadalp[_s16]_m(svbool_t pg, svint16_t op1, svint8_t op2) : "SADALP Ztied1.H, Pg/M, Zop2.B" or "MOVPRFX Zresult, Zop1; SADALP Zresult.H, Pg/M, Zop2.B" + /// svint16_t svadalp[_s16]_x(svbool_t pg, svint16_t op1, svint8_t op2) : "SADALP Ztied1.H, Pg/M, Zop2.B" or "MOVPRFX Zresult, Zop1; SADALP Zresult.H, Pg/M, Zop2.B" + /// svint16_t svadalp[_s16]_z(svbool_t pg, svint16_t op1, svint8_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; SADALP Zresult.H, Pg/M, Zop2.B" + public static unsafe Vector AddPairwiseWidening(Vector left, Vector right); + + /// svint32_t svadalp[_s32]_m(svbool_t pg, svint32_t op1, svint16_t op2) : "SADALP Ztied1.S, Pg/M, Zop2.H" or "MOVPRFX Zresult, Zop1; SADALP Zresult.S, Pg/M, Zop2.H" + /// svint32_t svadalp[_s32]_x(svbool_t pg, svint32_t op1, svint16_t op2) : "SADALP Ztied1.S, Pg/M, Zop2.H" or "MOVPRFX Zresult, Zop1; SADALP Zresult.S, Pg/M, Zop2.H" + /// svint32_t svadalp[_s32]_z(svbool_t pg, svint32_t op1, svint16_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; SADALP Zresult.S, Pg/M, Zop2.H" + public static unsafe Vector AddPairwiseWidening(Vector left, Vector right); + + /// svint64_t svadalp[_s64]_m(svbool_t pg, svint64_t op1, svint32_t op2) : "SADALP Ztied1.D, Pg/M, Zop2.S" or "MOVPRFX Zresult, Zop1; SADALP Zresult.D, Pg/M, Zop2.S" + /// svint64_t svadalp[_s64]_x(svbool_t pg, svint64_t op1, svint32_t op2) : "SADALP Ztied1.D, Pg/M, Zop2.S" or "MOVPRFX Zresult, Zop1; SADALP Zresult.D, Pg/M, Zop2.S" + /// svint64_t svadalp[_s64]_z(svbool_t pg, svint64_t op1, svint32_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; SADALP Zresult.D, Pg/M, Zop2.S" + public static unsafe Vector AddPairwiseWidening(Vector left, Vector right); + + /// svuint16_t svadalp[_u16]_m(svbool_t pg, svuint16_t op1, svuint8_t op2) : "UADALP Ztied1.H, Pg/M, Zop2.B" or "MOVPRFX Zresult, Zop1; UADALP Zresult.H, Pg/M, Zop2.B" + /// svuint16_t svadalp[_u16]_x(svbool_t pg, svuint16_t op1, svuint8_t op2) : "UADALP Ztied1.H, Pg/M, Zop2.B" or "MOVPRFX Zresult, Zop1; UADALP Zresult.H, Pg/M, Zop2.B" + /// svuint16_t svadalp[_u16]_z(svbool_t pg, svuint16_t op1, svuint8_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; UADALP Zresult.H, Pg/M, Zop2.B" + public static unsafe Vector AddPairwiseWidening(Vector left, Vector right); + + /// svuint32_t svadalp[_u32]_m(svbool_t pg, svuint32_t op1, svuint16_t op2) : "UADALP Ztied1.S, Pg/M, Zop2.H" or "MOVPRFX Zresult, Zop1; UADALP Zresult.S, Pg/M, Zop2.H" + /// svuint32_t svadalp[_u32]_x(svbool_t pg, svuint32_t op1, svuint16_t op2) : "UADALP Ztied1.S, Pg/M, Zop2.H" or "MOVPRFX Zresult, Zop1; UADALP Zresult.S, Pg/M, Zop2.H" + /// svuint32_t svadalp[_u32]_z(svbool_t pg, svuint32_t op1, svuint16_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; UADALP Zresult.S, Pg/M, Zop2.H" + public static unsafe Vector AddPairwiseWidening(Vector left, Vector right); + + /// svuint64_t svadalp[_u64]_m(svbool_t pg, svuint64_t op1, svuint32_t op2) : "UADALP Ztied1.D, Pg/M, Zop2.S" or "MOVPRFX Zresult, Zop1; UADALP Zresult.D, Pg/M, Zop2.S" + /// svuint64_t svadalp[_u64]_x(svbool_t pg, svuint64_t op1, svuint32_t op2) : "UADALP Ztied1.D, Pg/M, Zop2.S" or "MOVPRFX Zresult, Zop1; UADALP Zresult.D, Pg/M, Zop2.S" + /// svuint64_t svadalp[_u64]_z(svbool_t pg, svuint64_t op1, svuint32_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; UADALP Zresult.D, Pg/M, Zop2.S" + public static unsafe Vector AddPairwiseWidening(Vector left, Vector right); + + + /// AddSaturate : Saturating add + + /// svint8_t svqadd[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) : "SQADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; SQADD Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svqadd[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) : "SQADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "SQADD Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "SQADD Zresult.B, Zop1.B, Zop2.B" + /// svint8_t svqadd[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; SQADD Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; SQADD Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector AddSaturate(Vector left, Vector right); + + /// svint16_t svqadd[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) : "SQADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; SQADD Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svqadd[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) : "SQADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "SQADD Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "SQADD Zresult.H, Zop1.H, Zop2.H" + /// svint16_t svqadd[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; SQADD Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; SQADD Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector AddSaturate(Vector left, Vector right); + + /// svint32_t svqadd[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) : "SQADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; SQADD Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svqadd[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) : "SQADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "SQADD Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "SQADD Zresult.S, Zop1.S, Zop2.S" + /// svint32_t svqadd[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; SQADD Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; SQADD Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector AddSaturate(Vector left, Vector right); + + /// svint64_t svqadd[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) : "SQADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; SQADD Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svqadd[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) : "SQADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "SQADD Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "SQADD Zresult.D, Zop1.D, Zop2.D" + /// svint64_t svqadd[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; SQADD Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; SQADD Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector AddSaturate(Vector left, Vector right); + + /// svuint8_t svqadd[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) : "UQADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; UQADD Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svqadd[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) : "UQADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "UQADD Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "UQADD Zresult.B, Zop1.B, Zop2.B" + /// svuint8_t svqadd[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; UQADD Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; UQADD Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector AddSaturate(Vector left, Vector right); + + /// svuint16_t svqadd[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) : "UQADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; UQADD Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svqadd[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) : "UQADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "UQADD Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "UQADD Zresult.H, Zop1.H, Zop2.H" + /// svuint16_t svqadd[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; UQADD Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; UQADD Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector AddSaturate(Vector left, Vector right); + + /// svuint32_t svqadd[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) : "UQADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; UQADD Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svqadd[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) : "UQADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "UQADD Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "UQADD Zresult.S, Zop1.S, Zop2.S" + /// svuint32_t svqadd[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; UQADD Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; UQADD Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector AddSaturate(Vector left, Vector right); + + /// svuint64_t svqadd[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) : "UQADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; UQADD Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svqadd[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) : "UQADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "UQADD Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "UQADD Zresult.D, Zop1.D, Zop2.D" + /// svuint64_t svqadd[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; UQADD Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; UQADD Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector AddSaturate(Vector left, Vector right); + + + /// AddSaturateWithSignedAddend : Saturating add with signed addend + + /// svuint8_t svsqadd[_u8]_m(svbool_t pg, svuint8_t op1, svint8_t op2) : "USQADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; USQADD Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svsqadd[_u8]_x(svbool_t pg, svuint8_t op1, svint8_t op2) : "USQADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; USQADD Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svsqadd[_u8]_z(svbool_t pg, svuint8_t op1, svint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; USQADD Zresult.B, Pg/M, Zresult.B, Zop2.B" + public static unsafe Vector AddSaturateWithSignedAddend(Vector left, Vector right); + + /// svuint16_t svsqadd[_u16]_m(svbool_t pg, svuint16_t op1, svint16_t op2) : "USQADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; USQADD Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svsqadd[_u16]_x(svbool_t pg, svuint16_t op1, svint16_t op2) : "USQADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; USQADD Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svsqadd[_u16]_z(svbool_t pg, svuint16_t op1, svint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; USQADD Zresult.H, Pg/M, Zresult.H, Zop2.H" + public static unsafe Vector AddSaturateWithSignedAddend(Vector left, Vector right); + + /// svuint32_t svsqadd[_u32]_m(svbool_t pg, svuint32_t op1, svint32_t op2) : "USQADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; USQADD Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svsqadd[_u32]_x(svbool_t pg, svuint32_t op1, svint32_t op2) : "USQADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; USQADD Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svsqadd[_u32]_z(svbool_t pg, svuint32_t op1, svint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; USQADD Zresult.S, Pg/M, Zresult.S, Zop2.S" + public static unsafe Vector AddSaturateWithSignedAddend(Vector left, Vector right); + + /// svuint64_t svsqadd[_u64]_m(svbool_t pg, svuint64_t op1, svint64_t op2) : "USQADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; USQADD Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svsqadd[_u64]_x(svbool_t pg, svuint64_t op1, svint64_t op2) : "USQADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; USQADD Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svsqadd[_u64]_z(svbool_t pg, svuint64_t op1, svint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; USQADD Zresult.D, Pg/M, Zresult.D, Zop2.D" + public static unsafe Vector AddSaturateWithSignedAddend(Vector left, Vector right); + + + /// AddSaturateWithUnsignedAddend : Saturating add with unsigned addend + + /// svint8_t svuqadd[_s8]_m(svbool_t pg, svint8_t op1, svuint8_t op2) : "SUQADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; SUQADD Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svuqadd[_s8]_x(svbool_t pg, svint8_t op1, svuint8_t op2) : "SUQADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; SUQADD Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svuqadd[_s8]_z(svbool_t pg, svint8_t op1, svuint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; SUQADD Zresult.B, Pg/M, Zresult.B, Zop2.B" + public static unsafe Vector AddSaturateWithUnsignedAddend(Vector left, Vector right); + + /// svint16_t svuqadd[_s16]_m(svbool_t pg, svint16_t op1, svuint16_t op2) : "SUQADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; SUQADD Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svuqadd[_s16]_x(svbool_t pg, svint16_t op1, svuint16_t op2) : "SUQADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; SUQADD Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svuqadd[_s16]_z(svbool_t pg, svint16_t op1, svuint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; SUQADD Zresult.H, Pg/M, Zresult.H, Zop2.H" + public static unsafe Vector AddSaturateWithUnsignedAddend(Vector left, Vector right); + + /// svint32_t svuqadd[_s32]_m(svbool_t pg, svint32_t op1, svuint32_t op2) : "SUQADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; SUQADD Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svuqadd[_s32]_x(svbool_t pg, svint32_t op1, svuint32_t op2) : "SUQADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; SUQADD Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svuqadd[_s32]_z(svbool_t pg, svint32_t op1, svuint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; SUQADD Zresult.S, Pg/M, Zresult.S, Zop2.S" + public static unsafe Vector AddSaturateWithUnsignedAddend(Vector left, Vector right); + + /// svint64_t svuqadd[_s64]_m(svbool_t pg, svint64_t op1, svuint64_t op2) : "SUQADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; SUQADD Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svuqadd[_s64]_x(svbool_t pg, svint64_t op1, svuint64_t op2) : "SUQADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; SUQADD Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svuqadd[_s64]_z(svbool_t pg, svint64_t op1, svuint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; SUQADD Zresult.D, Pg/M, Zresult.D, Zop2.D" + public static unsafe Vector AddSaturateWithUnsignedAddend(Vector left, Vector right); + + + /// AddWideLower : Add wide (bottom) + + /// svint16_t svaddwb[_s16](svint16_t op1, svint8_t op2) : "SADDWB Zresult.H, Zop1.H, Zop2.B" + public static unsafe Vector AddWideLower(Vector left, Vector right); + + /// svint32_t svaddwb[_s32](svint32_t op1, svint16_t op2) : "SADDWB Zresult.S, Zop1.S, Zop2.H" + public static unsafe Vector AddWideLower(Vector left, Vector right); + + /// svint64_t svaddwb[_s64](svint64_t op1, svint32_t op2) : "SADDWB Zresult.D, Zop1.D, Zop2.S" + public static unsafe Vector AddWideLower(Vector left, Vector right); + + /// svuint16_t svaddwb[_u16](svuint16_t op1, svuint8_t op2) : "UADDWB Zresult.H, Zop1.H, Zop2.B" + public static unsafe Vector AddWideLower(Vector left, Vector right); + + /// svuint32_t svaddwb[_u32](svuint32_t op1, svuint16_t op2) : "UADDWB Zresult.S, Zop1.S, Zop2.H" + public static unsafe Vector AddWideLower(Vector left, Vector right); + + /// svuint64_t svaddwb[_u64](svuint64_t op1, svuint32_t op2) : "UADDWB Zresult.D, Zop1.D, Zop2.S" + public static unsafe Vector AddWideLower(Vector left, Vector right); + + + /// AddWideUpper : Add wide (top) + + /// svint16_t svaddwt[_s16](svint16_t op1, svint8_t op2) : "SADDWT Zresult.H, Zop1.H, Zop2.B" + public static unsafe Vector AddWideUpper(Vector left, Vector right); + + /// svint32_t svaddwt[_s32](svint32_t op1, svint16_t op2) : "SADDWT Zresult.S, Zop1.S, Zop2.H" + public static unsafe Vector AddWideUpper(Vector left, Vector right); + + /// svint64_t svaddwt[_s64](svint64_t op1, svint32_t op2) : "SADDWT Zresult.D, Zop1.D, Zop2.S" + public static unsafe Vector AddWideUpper(Vector left, Vector right); + + /// svuint16_t svaddwt[_u16](svuint16_t op1, svuint8_t op2) : "UADDWT Zresult.H, Zop1.H, Zop2.B" + public static unsafe Vector AddWideUpper(Vector left, Vector right); + + /// svuint32_t svaddwt[_u32](svuint32_t op1, svuint16_t op2) : "UADDWT Zresult.S, Zop1.S, Zop2.H" + public static unsafe Vector AddWideUpper(Vector left, Vector right); + + /// svuint64_t svaddwt[_u64](svuint64_t op1, svuint32_t op2) : "UADDWT Zresult.D, Zop1.D, Zop2.S" + public static unsafe Vector AddWideUpper(Vector left, Vector right); + + + /// AddWideningLower : Add long (bottom) + + /// svint16_t svaddlb[_s16](svint8_t op1, svint8_t op2) : "SADDLB Zresult.H, Zop1.B, Zop2.B" + public static unsafe Vector AddWideningLower(Vector left, Vector right); + + /// svint32_t svaddlb[_s32](svint16_t op1, svint16_t op2) : "SADDLB Zresult.S, Zop1.H, Zop2.H" + public static unsafe Vector AddWideningLower(Vector left, Vector right); + + /// svint64_t svaddlb[_s64](svint32_t op1, svint32_t op2) : "SADDLB Zresult.D, Zop1.S, Zop2.S" + public static unsafe Vector AddWideningLower(Vector left, Vector right); + + /// svuint16_t svaddlb[_u16](svuint8_t op1, svuint8_t op2) : "UADDLB Zresult.H, Zop1.B, Zop2.B" + public static unsafe Vector AddWideningLower(Vector left, Vector right); + + /// svuint32_t svaddlb[_u32](svuint16_t op1, svuint16_t op2) : "UADDLB Zresult.S, Zop1.H, Zop2.H" + public static unsafe Vector AddWideningLower(Vector left, Vector right); + + /// svuint64_t svaddlb[_u64](svuint32_t op1, svuint32_t op2) : "UADDLB Zresult.D, Zop1.S, Zop2.S" + public static unsafe Vector AddWideningLower(Vector left, Vector right); + + + /// AddWideningLowerUpper : Add long (bottom + top) + + /// svint16_t svaddlbt[_s16](svint8_t op1, svint8_t op2) : "SADDLBT Zresult.H, Zop1.B, Zop2.B" + public static unsafe Vector AddWideningLowerUpper(Vector left, Vector right); + + /// svint32_t svaddlbt[_s32](svint16_t op1, svint16_t op2) : "SADDLBT Zresult.S, Zop1.H, Zop2.H" + public static unsafe Vector AddWideningLowerUpper(Vector left, Vector right); + + /// svint64_t svaddlbt[_s64](svint32_t op1, svint32_t op2) : "SADDLBT Zresult.D, Zop1.S, Zop2.S" + public static unsafe Vector AddWideningLowerUpper(Vector left, Vector right); + + + /// AddWideningUpper : Add long (top) + + /// svint16_t svaddlt[_s16](svint8_t op1, svint8_t op2) : "SADDLT Zresult.H, Zop1.B, Zop2.B" + public static unsafe Vector AddWideningUpper(Vector left, Vector right); + + /// svint32_t svaddlt[_s32](svint16_t op1, svint16_t op2) : "SADDLT Zresult.S, Zop1.H, Zop2.H" + public static unsafe Vector AddWideningUpper(Vector left, Vector right); + + /// svint64_t svaddlt[_s64](svint32_t op1, svint32_t op2) : "SADDLT Zresult.D, Zop1.S, Zop2.S" + public static unsafe Vector AddWideningUpper(Vector left, Vector right); + + /// svuint16_t svaddlt[_u16](svuint8_t op1, svuint8_t op2) : "UADDLT Zresult.H, Zop1.B, Zop2.B" + public static unsafe Vector AddWideningUpper(Vector left, Vector right); + + /// svuint32_t svaddlt[_u32](svuint16_t op1, svuint16_t op2) : "UADDLT Zresult.S, Zop1.H, Zop2.H" + public static unsafe Vector AddWideningUpper(Vector left, Vector right); + + /// svuint64_t svaddlt[_u64](svuint32_t op1, svuint32_t op2) : "UADDLT Zresult.D, Zop1.S, Zop2.S" + public static unsafe Vector AddWideningUpper(Vector left, Vector right); + + + /// DotProductComplex : Complex dot product + + /// svint32_t svcdot[_s32](svint32_t op1, svint8_t op2, svint8_t op3, uint64_t imm_rotation) : "CDOT Ztied1.S, Zop2.B, Zop3.B, #imm_rotation" or "MOVPRFX Zresult, Zop1; CDOT Zresult.S, Zop2.B, Zop3.B, #imm_rotation" + public static unsafe Vector DotProductComplex(Vector op1, Vector op2, Vector op3, [ConstantExpected] byte rotation); + + /// svint64_t svcdot[_s64](svint64_t op1, svint16_t op2, svint16_t op3, uint64_t imm_rotation) : "CDOT Ztied1.D, Zop2.H, Zop3.H, #imm_rotation" or "MOVPRFX Zresult, Zop1; CDOT Zresult.D, Zop2.H, Zop3.H, #imm_rotation" + public static unsafe Vector DotProductComplex(Vector op1, Vector op2, Vector op3, [ConstantExpected] byte rotation); + + /// svint32_t svcdot_lane[_s32](svint32_t op1, svint8_t op2, svint8_t op3, uint64_t imm_index, uint64_t imm_rotation) : "CDOT Ztied1.S, Zop2.B, Zop3.B[imm_index], #imm_rotation" or "MOVPRFX Zresult, Zop1; CDOT Zresult.S, Zop2.B, Zop3.B[imm_index], #imm_rotation" + public static unsafe Vector DotProductComplex(Vector op1, Vector op2, Vector op3, ulong imm_index, [ConstantExpected] byte rotation); + + /// svint64_t svcdot_lane[_s64](svint64_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index, uint64_t imm_rotation) : "CDOT Ztied1.D, Zop2.H, Zop3.H[imm_index], #imm_rotation" or "MOVPRFX Zresult, Zop1; CDOT Zresult.D, Zop2.H, Zop3.H[imm_index], #imm_rotation" + public static unsafe Vector DotProductComplex(Vector op1, Vector op2, Vector op3, ulong imm_index, [ConstantExpected] byte rotation); + + + /// HalvingAdd : Halving add + + /// svint8_t svhadd[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) : "SHADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; SHADD Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svhadd[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) : "SHADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "SHADD Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; SHADD Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svhadd[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; SHADD Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; SHADD Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector HalvingAdd(Vector left, Vector right); + + /// svint16_t svhadd[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) : "SHADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; SHADD Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svhadd[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) : "SHADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "SHADD Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; SHADD Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svhadd[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; SHADD Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; SHADD Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector HalvingAdd(Vector left, Vector right); + + /// svint32_t svhadd[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) : "SHADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; SHADD Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svhadd[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) : "SHADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "SHADD Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; SHADD Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svhadd[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; SHADD Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; SHADD Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector HalvingAdd(Vector left, Vector right); + + /// svint64_t svhadd[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) : "SHADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; SHADD Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svhadd[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) : "SHADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "SHADD Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; SHADD Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svhadd[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; SHADD Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; SHADD Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector HalvingAdd(Vector left, Vector right); + + /// svuint8_t svhadd[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) : "UHADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; UHADD Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svhadd[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) : "UHADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "UHADD Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; UHADD Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svhadd[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; UHADD Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; UHADD Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector HalvingAdd(Vector left, Vector right); + + /// svuint16_t svhadd[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) : "UHADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; UHADD Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svhadd[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) : "UHADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "UHADD Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; UHADD Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svhadd[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; UHADD Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; UHADD Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector HalvingAdd(Vector left, Vector right); + + /// svuint32_t svhadd[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) : "UHADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; UHADD Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svhadd[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) : "UHADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "UHADD Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; UHADD Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svhadd[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; UHADD Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; UHADD Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector HalvingAdd(Vector left, Vector right); + + /// svuint64_t svhadd[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) : "UHADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; UHADD Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svhadd[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) : "UHADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "UHADD Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; UHADD Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svhadd[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; UHADD Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; UHADD Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector HalvingAdd(Vector left, Vector right); + + + /// HalvingSubtract : Halving subtract + + /// svint8_t svhsub[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) : "SHSUB Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; SHSUB Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svhsub[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) : "SHSUB Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "SHSUBR Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; SHSUB Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svhsub[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; SHSUB Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; SHSUBR Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector HalvingSubtract(Vector left, Vector right); + + /// svint16_t svhsub[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) : "SHSUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; SHSUB Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svhsub[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) : "SHSUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "SHSUBR Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; SHSUB Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svhsub[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; SHSUB Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; SHSUBR Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector HalvingSubtract(Vector left, Vector right); + + /// svint32_t svhsub[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) : "SHSUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; SHSUB Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svhsub[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) : "SHSUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "SHSUBR Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; SHSUB Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svhsub[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; SHSUB Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; SHSUBR Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector HalvingSubtract(Vector left, Vector right); + + /// svint64_t svhsub[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) : "SHSUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; SHSUB Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svhsub[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) : "SHSUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "SHSUBR Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; SHSUB Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svhsub[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; SHSUB Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; SHSUBR Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector HalvingSubtract(Vector left, Vector right); + + /// svuint8_t svhsub[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) : "UHSUB Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; UHSUB Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svhsub[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) : "UHSUB Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "UHSUBR Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; UHSUB Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svhsub[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; UHSUB Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; UHSUBR Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector HalvingSubtract(Vector left, Vector right); + + /// svuint16_t svhsub[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) : "UHSUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; UHSUB Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svhsub[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) : "UHSUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "UHSUBR Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; UHSUB Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svhsub[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; UHSUB Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; UHSUBR Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector HalvingSubtract(Vector left, Vector right); + + /// svuint32_t svhsub[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) : "UHSUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; UHSUB Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svhsub[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) : "UHSUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "UHSUBR Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; UHSUB Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svhsub[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; UHSUB Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; UHSUBR Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector HalvingSubtract(Vector left, Vector right); + + /// svuint64_t svhsub[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) : "UHSUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; UHSUB Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svhsub[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) : "UHSUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "UHSUBR Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; UHSUB Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svhsub[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; UHSUB Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; UHSUBR Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector HalvingSubtract(Vector left, Vector right); + + + /// HalvingSubtractReversed : Halving subtract reversed + + /// svint8_t svhsubr[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) : "SHSUBR Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; SHSUBR Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svhsubr[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) : "SHSUBR Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "SHSUB Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; SHSUBR Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svhsubr[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; SHSUBR Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; SHSUB Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right); + + /// svint16_t svhsubr[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) : "SHSUBR Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; SHSUBR Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svhsubr[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) : "SHSUBR Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "SHSUB Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; SHSUBR Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svhsubr[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; SHSUBR Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; SHSUB Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right); + + /// svint32_t svhsubr[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) : "SHSUBR Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; SHSUBR Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svhsubr[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) : "SHSUBR Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "SHSUB Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; SHSUBR Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svhsubr[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; SHSUBR Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; SHSUB Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right); + + /// svint64_t svhsubr[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) : "SHSUBR Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; SHSUBR Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svhsubr[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) : "SHSUBR Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "SHSUB Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; SHSUBR Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svhsubr[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; SHSUBR Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; SHSUB Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right); + + /// svuint8_t svhsubr[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) : "UHSUBR Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; UHSUBR Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svhsubr[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) : "UHSUBR Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "UHSUB Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; UHSUBR Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svhsubr[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; UHSUBR Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; UHSUB Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right); + + /// svuint16_t svhsubr[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) : "UHSUBR Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; UHSUBR Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svhsubr[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) : "UHSUBR Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "UHSUB Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; UHSUBR Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svhsubr[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; UHSUBR Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; UHSUB Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right); + + /// svuint32_t svhsubr[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) : "UHSUBR Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; UHSUBR Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svhsubr[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) : "UHSUBR Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "UHSUB Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; UHSUBR Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svhsubr[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; UHSUBR Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; UHSUB Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right); + + /// svuint64_t svhsubr[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) : "UHSUBR Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; UHSUBR Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svhsubr[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) : "UHSUBR Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "UHSUB Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; UHSUBR Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svhsubr[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; UHSUBR Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; UHSUB Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right); + + + /// MaxNumberPairwise : Maximum number pairwise + + /// svfloat32_t svmaxnmp[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FMAXNMP Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; FMAXNMP Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svfloat32_t svmaxnmp[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FMAXNMP Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; FMAXNMP Zresult.S, Pg/M, Zresult.S, Zop2.S" + public static unsafe Vector MaxNumberPairwise(Vector left, Vector right); + + /// svfloat64_t svmaxnmp[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FMAXNMP Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; FMAXNMP Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svfloat64_t svmaxnmp[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FMAXNMP Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; FMAXNMP Zresult.D, Pg/M, Zresult.D, Zop2.D" + public static unsafe Vector MaxNumberPairwise(Vector left, Vector right); + + + /// MaxPairwise : Maximum pairwise + + /// svfloat32_t svmaxp[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FMAXP Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; FMAXP Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svfloat32_t svmaxp[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FMAXP Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; FMAXP Zresult.S, Pg/M, Zresult.S, Zop2.S" + public static unsafe Vector MaxPairwise(Vector left, Vector right); + + /// svfloat64_t svmaxp[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FMAXP Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; FMAXP Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svfloat64_t svmaxp[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FMAXP Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; FMAXP Zresult.D, Pg/M, Zresult.D, Zop2.D" + public static unsafe Vector MaxPairwise(Vector left, Vector right); + + /// svint8_t svmaxp[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) : "SMAXP Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; SMAXP Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svmaxp[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) : "SMAXP Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; SMAXP Zresult.B, Pg/M, Zresult.B, Zop2.B" + public static unsafe Vector MaxPairwise(Vector left, Vector right); + + /// svint16_t svmaxp[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) : "SMAXP Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; SMAXP Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svmaxp[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) : "SMAXP Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; SMAXP Zresult.H, Pg/M, Zresult.H, Zop2.H" + public static unsafe Vector MaxPairwise(Vector left, Vector right); + + /// svint32_t svmaxp[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) : "SMAXP Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; SMAXP Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svmaxp[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) : "SMAXP Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; SMAXP Zresult.S, Pg/M, Zresult.S, Zop2.S" + public static unsafe Vector MaxPairwise(Vector left, Vector right); + + /// svint64_t svmaxp[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) : "SMAXP Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; SMAXP Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svmaxp[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) : "SMAXP Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; SMAXP Zresult.D, Pg/M, Zresult.D, Zop2.D" + public static unsafe Vector MaxPairwise(Vector left, Vector right); + + /// svuint8_t svmaxp[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) : "UMAXP Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; UMAXP Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svmaxp[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) : "UMAXP Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; UMAXP Zresult.B, Pg/M, Zresult.B, Zop2.B" + public static unsafe Vector MaxPairwise(Vector left, Vector right); + + /// svuint16_t svmaxp[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) : "UMAXP Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; UMAXP Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svmaxp[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) : "UMAXP Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; UMAXP Zresult.H, Pg/M, Zresult.H, Zop2.H" + public static unsafe Vector MaxPairwise(Vector left, Vector right); + + /// svuint32_t svmaxp[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) : "UMAXP Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; UMAXP Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svmaxp[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) : "UMAXP Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; UMAXP Zresult.S, Pg/M, Zresult.S, Zop2.S" + public static unsafe Vector MaxPairwise(Vector left, Vector right); + + /// svuint64_t svmaxp[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) : "UMAXP Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; UMAXP Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svmaxp[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) : "UMAXP Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; UMAXP Zresult.D, Pg/M, Zresult.D, Zop2.D" + public static unsafe Vector MaxPairwise(Vector left, Vector right); + + + /// MinNumberPairwise : Minimum number pairwise + + /// svfloat32_t svminnmp[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FMINNMP Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; FMINNMP Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svfloat32_t svminnmp[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FMINNMP Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; FMINNMP Zresult.S, Pg/M, Zresult.S, Zop2.S" + public static unsafe Vector MinNumberPairwise(Vector left, Vector right); + + /// svfloat64_t svminnmp[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FMINNMP Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; FMINNMP Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svfloat64_t svminnmp[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FMINNMP Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; FMINNMP Zresult.D, Pg/M, Zresult.D, Zop2.D" + public static unsafe Vector MinNumberPairwise(Vector left, Vector right); + + + /// MinPairwise : Minimum pairwise + + /// svfloat32_t svminp[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FMINP Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; FMINP Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svfloat32_t svminp[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FMINP Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; FMINP Zresult.S, Pg/M, Zresult.S, Zop2.S" + public static unsafe Vector MinPairwise(Vector left, Vector right); + + /// svfloat64_t svminp[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FMINP Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; FMINP Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svfloat64_t svminp[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FMINP Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; FMINP Zresult.D, Pg/M, Zresult.D, Zop2.D" + public static unsafe Vector MinPairwise(Vector left, Vector right); + + /// svint8_t svminp[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) : "SMINP Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; SMINP Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svminp[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) : "SMINP Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; SMINP Zresult.B, Pg/M, Zresult.B, Zop2.B" + public static unsafe Vector MinPairwise(Vector left, Vector right); + + /// svint16_t svminp[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) : "SMINP Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; SMINP Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svminp[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) : "SMINP Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; SMINP Zresult.H, Pg/M, Zresult.H, Zop2.H" + public static unsafe Vector MinPairwise(Vector left, Vector right); + + /// svint32_t svminp[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) : "SMINP Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; SMINP Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svminp[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) : "SMINP Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; SMINP Zresult.S, Pg/M, Zresult.S, Zop2.S" + public static unsafe Vector MinPairwise(Vector left, Vector right); + + /// svint64_t svminp[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) : "SMINP Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; SMINP Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svminp[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) : "SMINP Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; SMINP Zresult.D, Pg/M, Zresult.D, Zop2.D" + public static unsafe Vector MinPairwise(Vector left, Vector right); + + /// svuint8_t svminp[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) : "UMINP Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; UMINP Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svminp[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) : "UMINP Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; UMINP Zresult.B, Pg/M, Zresult.B, Zop2.B" + public static unsafe Vector MinPairwise(Vector left, Vector right); + + /// svuint16_t svminp[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) : "UMINP Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; UMINP Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svminp[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) : "UMINP Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; UMINP Zresult.H, Pg/M, Zresult.H, Zop2.H" + public static unsafe Vector MinPairwise(Vector left, Vector right); + + /// svuint32_t svminp[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) : "UMINP Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; UMINP Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svminp[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) : "UMINP Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; UMINP Zresult.S, Pg/M, Zresult.S, Zop2.S" + public static unsafe Vector MinPairwise(Vector left, Vector right); + + /// svuint64_t svminp[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) : "UMINP Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; UMINP Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svminp[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) : "UMINP Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; UMINP Zresult.D, Pg/M, Zresult.D, Zop2.D" + public static unsafe Vector MinPairwise(Vector left, Vector right); + + + /// MultiplyAddBySelectedScalar : Multiply-add, addend first + + /// svint16_t svmla_lane[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) : "MLA Ztied1.H, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; MLA Zresult.H, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector MultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// svint32_t svmla_lane[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) : "MLA Ztied1.S, Zop2.S, Zop3.S[imm_index]" or "MOVPRFX Zresult, Zop1; MLA Zresult.S, Zop2.S, Zop3.S[imm_index]" + public static unsafe Vector MultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// svint64_t svmla_lane[_s64](svint64_t op1, svint64_t op2, svint64_t op3, uint64_t imm_index) : "MLA Ztied1.D, Zop2.D, Zop3.D[imm_index]" or "MOVPRFX Zresult, Zop1; MLA Zresult.D, Zop2.D, Zop3.D[imm_index]" + public static unsafe Vector MultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// svuint16_t svmla_lane[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) : "MLA Ztied1.H, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; MLA Zresult.H, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector MultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// svuint32_t svmla_lane[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index) : "MLA Ztied1.S, Zop2.S, Zop3.S[imm_index]" or "MOVPRFX Zresult, Zop1; MLA Zresult.S, Zop2.S, Zop3.S[imm_index]" + public static unsafe Vector MultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// svuint64_t svmla_lane[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3, uint64_t imm_index) : "MLA Ztied1.D, Zop2.D, Zop3.D[imm_index]" or "MOVPRFX Zresult, Zop1; MLA Zresult.D, Zop2.D, Zop3.D[imm_index]" + public static unsafe Vector MultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + + /// MultiplyAddWideningLower : Multiply-add long (bottom) + + /// svint16_t svmlalb[_s16](svint16_t op1, svint8_t op2, svint8_t op3) : "SMLALB Ztied1.H, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; SMLALB Zresult.H, Zop2.B, Zop3.B" + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3); + + /// svint32_t svmlalb[_s32](svint32_t op1, svint16_t op2, svint16_t op3) : "SMLALB Ztied1.S, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; SMLALB Zresult.S, Zop2.H, Zop3.H" + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3); + + /// svint64_t svmlalb[_s64](svint64_t op1, svint32_t op2, svint32_t op3) : "SMLALB Ztied1.D, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; SMLALB Zresult.D, Zop2.S, Zop3.S" + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3); + + /// svuint16_t svmlalb[_u16](svuint16_t op1, svuint8_t op2, svuint8_t op3) : "UMLALB Ztied1.H, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; UMLALB Zresult.H, Zop2.B, Zop3.B" + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3); + + /// svuint32_t svmlalb[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3) : "UMLALB Ztied1.S, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; UMLALB Zresult.S, Zop2.H, Zop3.H" + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3); + + /// svuint64_t svmlalb[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3) : "UMLALB Ztied1.D, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; UMLALB Zresult.D, Zop2.S, Zop3.S" + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3); + + /// svint32_t svmlalb_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) : "SMLALB Ztied1.S, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; SMLALB Zresult.S, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index); + + /// svint64_t svmlalb_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) : "SMLALB Ztied1.D, Zop2.S, Zop3.S[imm_index]" or "MOVPRFX Zresult, Zop1; SMLALB Zresult.D, Zop2.S, Zop3.S[imm_index]" + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index); + + /// svuint32_t svmlalb_lane[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) : "UMLALB Ztied1.S, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; UMLALB Zresult.S, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index); + + /// svuint64_t svmlalb_lane[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index) : "UMLALB Ztied1.D, Zop2.S, Zop3.S[imm_index]" or "MOVPRFX Zresult, Zop1; UMLALB Zresult.D, Zop2.S, Zop3.S[imm_index]" + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index); + + + /// MultiplyAddWideningUpper : Multiply-add long (top) + + /// svint16_t svmlalt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) : "SMLALT Ztied1.H, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; SMLALT Zresult.H, Zop2.B, Zop3.B" + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3); + + /// svint32_t svmlalt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) : "SMLALT Ztied1.S, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; SMLALT Zresult.S, Zop2.H, Zop3.H" + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3); + + /// svint64_t svmlalt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) : "SMLALT Ztied1.D, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; SMLALT Zresult.D, Zop2.S, Zop3.S" + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3); + + /// svuint16_t svmlalt[_u16](svuint16_t op1, svuint8_t op2, svuint8_t op3) : "UMLALT Ztied1.H, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; UMLALT Zresult.H, Zop2.B, Zop3.B" + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3); + + /// svuint32_t svmlalt[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3) : "UMLALT Ztied1.S, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; UMLALT Zresult.S, Zop2.H, Zop3.H" + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3); + + /// svuint64_t svmlalt[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3) : "UMLALT Ztied1.D, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; UMLALT Zresult.D, Zop2.S, Zop3.S" + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3); + + /// svint32_t svmlalt_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) : "SMLALT Ztied1.S, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; SMLALT Zresult.S, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index); + + /// svint64_t svmlalt_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) : "SMLALT Ztied1.D, Zop2.S, Zop3.S[imm_index]" or "MOVPRFX Zresult, Zop1; SMLALT Zresult.D, Zop2.S, Zop3.S[imm_index]" + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index); + + /// svuint32_t svmlalt_lane[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) : "UMLALT Ztied1.S, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; UMLALT Zresult.S, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index); + + /// svuint64_t svmlalt_lane[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index) : "UMLALT Ztied1.D, Zop2.S, Zop3.S[imm_index]" or "MOVPRFX Zresult, Zop1; UMLALT Zresult.D, Zop2.S, Zop3.S[imm_index]" + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index); + + + /// MultiplyBySelectedScalar : Multiply + + /// svint16_t svmul_lane[_s16](svint16_t op1, svint16_t op2, uint64_t imm_index) : "MUL Zresult.H, Zop1.H, Zop2.H[imm_index]" + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// svint32_t svmul_lane[_s32](svint32_t op1, svint32_t op2, uint64_t imm_index) : "MUL Zresult.S, Zop1.S, Zop2.S[imm_index]" + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// svint64_t svmul_lane[_s64](svint64_t op1, svint64_t op2, uint64_t imm_index) : "MUL Zresult.D, Zop1.D, Zop2.D[imm_index]" + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// svuint16_t svmul_lane[_u16](svuint16_t op1, svuint16_t op2, uint64_t imm_index) : "MUL Zresult.H, Zop1.H, Zop2.H[imm_index]" + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// svuint32_t svmul_lane[_u32](svuint32_t op1, svuint32_t op2, uint64_t imm_index) : "MUL Zresult.S, Zop1.S, Zop2.S[imm_index]" + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// svuint64_t svmul_lane[_u64](svuint64_t op1, svuint64_t op2, uint64_t imm_index) : "MUL Zresult.D, Zop1.D, Zop2.D[imm_index]" + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex); + + + /// MultiplySubtractBySelectedScalar : Multiply-subtract, minuend first + + /// svint16_t svmls_lane[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) : "MLS Ztied1.H, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; MLS Zresult.H, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector MultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// svint32_t svmls_lane[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) : "MLS Ztied1.S, Zop2.S, Zop3.S[imm_index]" or "MOVPRFX Zresult, Zop1; MLS Zresult.S, Zop2.S, Zop3.S[imm_index]" + public static unsafe Vector MultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// svint64_t svmls_lane[_s64](svint64_t op1, svint64_t op2, svint64_t op3, uint64_t imm_index) : "MLS Ztied1.D, Zop2.D, Zop3.D[imm_index]" or "MOVPRFX Zresult, Zop1; MLS Zresult.D, Zop2.D, Zop3.D[imm_index]" + public static unsafe Vector MultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// svuint16_t svmls_lane[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) : "MLS Ztied1.H, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; MLS Zresult.H, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector MultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// svuint32_t svmls_lane[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index) : "MLS Ztied1.S, Zop2.S, Zop3.S[imm_index]" or "MOVPRFX Zresult, Zop1; MLS Zresult.S, Zop2.S, Zop3.S[imm_index]" + public static unsafe Vector MultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// svuint64_t svmls_lane[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3, uint64_t imm_index) : "MLS Ztied1.D, Zop2.D, Zop3.D[imm_index]" or "MOVPRFX Zresult, Zop1; MLS Zresult.D, Zop2.D, Zop3.D[imm_index]" + public static unsafe Vector MultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + + /// MultiplySubtractWideningLower : Multiply-subtract long (bottom) + + /// svint16_t svmlslb[_s16](svint16_t op1, svint8_t op2, svint8_t op3) : "SMLSLB Ztied1.H, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; SMLSLB Zresult.H, Zop2.B, Zop3.B" + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3); + + /// svint32_t svmlslb[_s32](svint32_t op1, svint16_t op2, svint16_t op3) : "SMLSLB Ztied1.S, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; SMLSLB Zresult.S, Zop2.H, Zop3.H" + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3); + + /// svint64_t svmlslb[_s64](svint64_t op1, svint32_t op2, svint32_t op3) : "SMLSLB Ztied1.D, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; SMLSLB Zresult.D, Zop2.S, Zop3.S" + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3); + + /// svuint16_t svmlslb[_u16](svuint16_t op1, svuint8_t op2, svuint8_t op3) : "UMLSLB Ztied1.H, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; UMLSLB Zresult.H, Zop2.B, Zop3.B" + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3); + + /// svuint32_t svmlslb[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3) : "UMLSLB Ztied1.S, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; UMLSLB Zresult.S, Zop2.H, Zop3.H" + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3); + + /// svuint64_t svmlslb[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3) : "UMLSLB Ztied1.D, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; UMLSLB Zresult.D, Zop2.S, Zop3.S" + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3); + + /// svint32_t svmlslb_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) : "SMLSLB Ztied1.S, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; SMLSLB Zresult.S, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index); + + /// svint64_t svmlslb_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) : "SMLSLB Ztied1.D, Zop2.S, Zop3.S[imm_index]" or "MOVPRFX Zresult, Zop1; SMLSLB Zresult.D, Zop2.S, Zop3.S[imm_index]" + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index); + + /// svuint32_t svmlslb_lane[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) : "UMLSLB Ztied1.S, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; UMLSLB Zresult.S, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index); + + /// svuint64_t svmlslb_lane[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index) : "UMLSLB Ztied1.D, Zop2.S, Zop3.S[imm_index]" or "MOVPRFX Zresult, Zop1; UMLSLB Zresult.D, Zop2.S, Zop3.S[imm_index]" + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index); + + + /// MultiplySubtractWideningUpper : Multiply-subtract long (top) + + /// svint16_t svmlslt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) : "SMLSLT Ztied1.H, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; SMLSLT Zresult.H, Zop2.B, Zop3.B" + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3); + + /// svint32_t svmlslt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) : "SMLSLT Ztied1.S, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; SMLSLT Zresult.S, Zop2.H, Zop3.H" + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3); + + /// svint64_t svmlslt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) : "SMLSLT Ztied1.D, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; SMLSLT Zresult.D, Zop2.S, Zop3.S" + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3); + + /// svuint16_t svmlslt[_u16](svuint16_t op1, svuint8_t op2, svuint8_t op3) : "UMLSLT Ztied1.H, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; UMLSLT Zresult.H, Zop2.B, Zop3.B" + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3); + + /// svuint32_t svmlslt[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3) : "UMLSLT Ztied1.S, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; UMLSLT Zresult.S, Zop2.H, Zop3.H" + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3); + + /// svuint64_t svmlslt[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3) : "UMLSLT Ztied1.D, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; UMLSLT Zresult.D, Zop2.S, Zop3.S" + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3); + + /// svint32_t svmlslt_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) : "SMLSLT Ztied1.S, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; SMLSLT Zresult.S, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index); + + /// svint64_t svmlslt_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) : "SMLSLT Ztied1.D, Zop2.S, Zop3.S[imm_index]" or "MOVPRFX Zresult, Zop1; SMLSLT Zresult.D, Zop2.S, Zop3.S[imm_index]" + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index); + + /// svuint32_t svmlslt_lane[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) : "UMLSLT Ztied1.S, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; UMLSLT Zresult.S, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index); + + /// svuint64_t svmlslt_lane[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index) : "UMLSLT Ztied1.D, Zop2.S, Zop3.S[imm_index]" or "MOVPRFX Zresult, Zop1; UMLSLT Zresult.D, Zop2.S, Zop3.S[imm_index]" + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index); + + + /// MultiplyWideningLower : Multiply long (bottom) + + /// svint16_t svmullb[_s16](svint8_t op1, svint8_t op2) : "SMULLB Zresult.H, Zop1.B, Zop2.B" + public static unsafe Vector MultiplyWideningLower(Vector left, Vector right); + + /// svint32_t svmullb[_s32](svint16_t op1, svint16_t op2) : "SMULLB Zresult.S, Zop1.H, Zop2.H" + public static unsafe Vector MultiplyWideningLower(Vector left, Vector right); + + /// svint64_t svmullb[_s64](svint32_t op1, svint32_t op2) : "SMULLB Zresult.D, Zop1.S, Zop2.S" + public static unsafe Vector MultiplyWideningLower(Vector left, Vector right); + + /// svuint16_t svmullb[_u16](svuint8_t op1, svuint8_t op2) : "UMULLB Zresult.H, Zop1.B, Zop2.B" + public static unsafe Vector MultiplyWideningLower(Vector left, Vector right); + + /// svuint32_t svmullb[_u32](svuint16_t op1, svuint16_t op2) : "UMULLB Zresult.S, Zop1.H, Zop2.H" + public static unsafe Vector MultiplyWideningLower(Vector left, Vector right); + + /// svuint64_t svmullb[_u64](svuint32_t op1, svuint32_t op2) : "UMULLB Zresult.D, Zop1.S, Zop2.S" + public static unsafe Vector MultiplyWideningLower(Vector left, Vector right); + + /// svint32_t svmullb_lane[_s32](svint16_t op1, svint16_t op2, uint64_t imm_index) : "SMULLB Zresult.S, Zop1.H, Zop2.H[imm_index]" + public static unsafe Vector MultiplyWideningLower(Vector op1, Vector op2, ulong imm_index); + + /// svint64_t svmullb_lane[_s64](svint32_t op1, svint32_t op2, uint64_t imm_index) : "SMULLB Zresult.D, Zop1.S, Zop2.S[imm_index]" + public static unsafe Vector MultiplyWideningLower(Vector op1, Vector op2, ulong imm_index); + + /// svuint32_t svmullb_lane[_u32](svuint16_t op1, svuint16_t op2, uint64_t imm_index) : "UMULLB Zresult.S, Zop1.H, Zop2.H[imm_index]" + public static unsafe Vector MultiplyWideningLower(Vector op1, Vector op2, ulong imm_index); + + /// svuint64_t svmullb_lane[_u64](svuint32_t op1, svuint32_t op2, uint64_t imm_index) : "UMULLB Zresult.D, Zop1.S, Zop2.S[imm_index]" + public static unsafe Vector MultiplyWideningLower(Vector op1, Vector op2, ulong imm_index); + + + /// MultiplyWideningUpper : Multiply long (top) + + /// svint16_t svmullt[_s16](svint8_t op1, svint8_t op2) : "SMULLT Zresult.H, Zop1.B, Zop2.B" + public static unsafe Vector MultiplyWideningUpper(Vector left, Vector right); + + /// svint32_t svmullt[_s32](svint16_t op1, svint16_t op2) : "SMULLT Zresult.S, Zop1.H, Zop2.H" + public static unsafe Vector MultiplyWideningUpper(Vector left, Vector right); + + /// svint64_t svmullt[_s64](svint32_t op1, svint32_t op2) : "SMULLT Zresult.D, Zop1.S, Zop2.S" + public static unsafe Vector MultiplyWideningUpper(Vector left, Vector right); + + /// svuint16_t svmullt[_u16](svuint8_t op1, svuint8_t op2) : "UMULLT Zresult.H, Zop1.B, Zop2.B" + public static unsafe Vector MultiplyWideningUpper(Vector left, Vector right); + + /// svuint32_t svmullt[_u32](svuint16_t op1, svuint16_t op2) : "UMULLT Zresult.S, Zop1.H, Zop2.H" + public static unsafe Vector MultiplyWideningUpper(Vector left, Vector right); + + /// svuint64_t svmullt[_u64](svuint32_t op1, svuint32_t op2) : "UMULLT Zresult.D, Zop1.S, Zop2.S" + public static unsafe Vector MultiplyWideningUpper(Vector left, Vector right); + + /// svint32_t svmullt_lane[_s32](svint16_t op1, svint16_t op2, uint64_t imm_index) : "SMULLT Zresult.S, Zop1.H, Zop2.H[imm_index]" + public static unsafe Vector MultiplyWideningUpper(Vector op1, Vector op2, ulong imm_index); + + /// svint64_t svmullt_lane[_s64](svint32_t op1, svint32_t op2, uint64_t imm_index) : "SMULLT Zresult.D, Zop1.S, Zop2.S[imm_index]" + public static unsafe Vector MultiplyWideningUpper(Vector op1, Vector op2, ulong imm_index); + + /// svuint32_t svmullt_lane[_u32](svuint16_t op1, svuint16_t op2, uint64_t imm_index) : "UMULLT Zresult.S, Zop1.H, Zop2.H[imm_index]" + public static unsafe Vector MultiplyWideningUpper(Vector op1, Vector op2, ulong imm_index); + + /// svuint64_t svmullt_lane[_u64](svuint32_t op1, svuint32_t op2, uint64_t imm_index) : "UMULLT Zresult.D, Zop1.S, Zop2.S[imm_index]" + public static unsafe Vector MultiplyWideningUpper(Vector op1, Vector op2, ulong imm_index); + + + /// PolynomialMultiply : Polynomial multiply + + /// svuint8_t svpmul[_u8](svuint8_t op1, svuint8_t op2) : "PMUL Zresult.B, Zop1.B, Zop2.B" + public static unsafe Vector PolynomialMultiply(Vector left, Vector right); + + + /// PolynomialMultiplyWideningLower : Polynomial multiply long (bottom) + + /// svuint16_t svpmullb[_u16](svuint8_t op1, svuint8_t op2) : "PMULLB Zresult.H, Zop1.B, Zop2.B" + public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, Vector right); + + /// svuint64_t svpmullb[_u64](svuint32_t op1, svuint32_t op2) : "PMULLB Zresult.D, Zop1.S, Zop2.S" + public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, Vector right); + + /// svuint8_t svpmullb_pair[_u8](svuint8_t op1, svuint8_t op2) : "PMULLB Zresult.H, Zop1.B, Zop2.B" + public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, Vector right); + + /// svuint32_t svpmullb_pair[_u32](svuint32_t op1, svuint32_t op2) : "PMULLB Zresult.D, Zop1.S, Zop2.S" + public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, Vector right); + + + /// PolynomialMultiplyWideningUpper : Polynomial multiply long (top) + + /// svuint16_t svpmullt[_u16](svuint8_t op1, svuint8_t op2) : "PMULLT Zresult.H, Zop1.B, Zop2.B" + public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, Vector right); + + /// svuint64_t svpmullt[_u64](svuint32_t op1, svuint32_t op2) : "PMULLT Zresult.D, Zop1.S, Zop2.S" + public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, Vector right); + + /// svuint8_t svpmullt_pair[_u8](svuint8_t op1, svuint8_t op2) : "PMULLT Zresult.H, Zop1.B, Zop2.B" + public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, Vector right); + + /// svuint32_t svpmullt_pair[_u32](svuint32_t op1, svuint32_t op2) : "PMULLT Zresult.D, Zop1.S, Zop2.S" + public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, Vector right); + + + /// RoundingAddHighNarowingLower : Rounding add narrow high part (bottom) + + /// svint8_t svraddhnb[_s16](svint16_t op1, svint16_t op2) : "RADDHNB Zresult.B, Zop1.H, Zop2.H" + public static unsafe Vector RoundingAddHighNarowingLower(Vector left, Vector right); + + /// svint16_t svraddhnb[_s32](svint32_t op1, svint32_t op2) : "RADDHNB Zresult.H, Zop1.S, Zop2.S" + public static unsafe Vector RoundingAddHighNarowingLower(Vector left, Vector right); + + /// svint32_t svraddhnb[_s64](svint64_t op1, svint64_t op2) : "RADDHNB Zresult.S, Zop1.D, Zop2.D" + public static unsafe Vector RoundingAddHighNarowingLower(Vector left, Vector right); + + /// svuint8_t svraddhnb[_u16](svuint16_t op1, svuint16_t op2) : "RADDHNB Zresult.B, Zop1.H, Zop2.H" + public static unsafe Vector RoundingAddHighNarowingLower(Vector left, Vector right); + + /// svuint16_t svraddhnb[_u32](svuint32_t op1, svuint32_t op2) : "RADDHNB Zresult.H, Zop1.S, Zop2.S" + public static unsafe Vector RoundingAddHighNarowingLower(Vector left, Vector right); + + /// svuint32_t svraddhnb[_u64](svuint64_t op1, svuint64_t op2) : "RADDHNB Zresult.S, Zop1.D, Zop2.D" + public static unsafe Vector RoundingAddHighNarowingLower(Vector left, Vector right); + + + /// RoundingAddHighNarowingUpper : Rounding add narrow high part (top) + + /// svint8_t svraddhnt[_s16](svint8_t even, svint16_t op1, svint16_t op2) : "RADDHNT Ztied.B, Zop1.H, Zop2.H" + public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, Vector right); + + /// svint16_t svraddhnt[_s32](svint16_t even, svint32_t op1, svint32_t op2) : "RADDHNT Ztied.H, Zop1.S, Zop2.S" + public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, Vector right); + + /// svint32_t svraddhnt[_s64](svint32_t even, svint64_t op1, svint64_t op2) : "RADDHNT Ztied.S, Zop1.D, Zop2.D" + public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, Vector right); + + /// svuint8_t svraddhnt[_u16](svuint8_t even, svuint16_t op1, svuint16_t op2) : "RADDHNT Ztied.B, Zop1.H, Zop2.H" + public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, Vector right); + + /// svuint16_t svraddhnt[_u32](svuint16_t even, svuint32_t op1, svuint32_t op2) : "RADDHNT Ztied.H, Zop1.S, Zop2.S" + public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, Vector right); + + /// svuint32_t svraddhnt[_u64](svuint32_t even, svuint64_t op1, svuint64_t op2) : "RADDHNT Ztied.S, Zop1.D, Zop2.D" + public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, Vector right); + + + /// RoundingHalvingAdd : Rounding halving add + + /// svint8_t svrhadd[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) : "SRHADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; SRHADD Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svrhadd[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) : "SRHADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "SRHADD Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; SRHADD Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svrhadd[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; SRHADD Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; SRHADD Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right); + + /// svint16_t svrhadd[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) : "SRHADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; SRHADD Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svrhadd[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) : "SRHADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "SRHADD Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; SRHADD Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svrhadd[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; SRHADD Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; SRHADD Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right); + + /// svint32_t svrhadd[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) : "SRHADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; SRHADD Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svrhadd[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) : "SRHADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "SRHADD Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; SRHADD Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svrhadd[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; SRHADD Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; SRHADD Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right); + + /// svint64_t svrhadd[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) : "SRHADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; SRHADD Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svrhadd[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) : "SRHADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "SRHADD Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; SRHADD Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svrhadd[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; SRHADD Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; SRHADD Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right); + + /// svuint8_t svrhadd[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) : "URHADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; URHADD Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svrhadd[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) : "URHADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "URHADD Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; URHADD Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svrhadd[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; URHADD Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; URHADD Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right); + + /// svuint16_t svrhadd[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) : "URHADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; URHADD Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svrhadd[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) : "URHADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "URHADD Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; URHADD Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svrhadd[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; URHADD Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; URHADD Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right); + + /// svuint32_t svrhadd[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) : "URHADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; URHADD Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svrhadd[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) : "URHADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "URHADD Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; URHADD Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svrhadd[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; URHADD Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; URHADD Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right); + + /// svuint64_t svrhadd[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) : "URHADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; URHADD Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svrhadd[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) : "URHADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "URHADD Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; URHADD Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svrhadd[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; URHADD Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; URHADD Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right); + + + /// RoundingSubtractHighNarowingLower : Rounding subtract narrow high part (bottom) + + /// svint8_t svrsubhnb[_s16](svint16_t op1, svint16_t op2) : "RSUBHNB Zresult.B, Zop1.H, Zop2.H" + public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, Vector right); + + /// svint16_t svrsubhnb[_s32](svint32_t op1, svint32_t op2) : "RSUBHNB Zresult.H, Zop1.S, Zop2.S" + public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, Vector right); + + /// svint32_t svrsubhnb[_s64](svint64_t op1, svint64_t op2) : "RSUBHNB Zresult.S, Zop1.D, Zop2.D" + public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, Vector right); + + /// svuint8_t svrsubhnb[_u16](svuint16_t op1, svuint16_t op2) : "RSUBHNB Zresult.B, Zop1.H, Zop2.H" + public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, Vector right); + + /// svuint16_t svrsubhnb[_u32](svuint32_t op1, svuint32_t op2) : "RSUBHNB Zresult.H, Zop1.S, Zop2.S" + public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, Vector right); + + /// svuint32_t svrsubhnb[_u64](svuint64_t op1, svuint64_t op2) : "RSUBHNB Zresult.S, Zop1.D, Zop2.D" + public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, Vector right); + + + /// RoundingSubtractHighNarowingUpper : Rounding subtract narrow high part (top) + + /// svint8_t svrsubhnt[_s16](svint8_t even, svint16_t op1, svint16_t op2) : "RSUBHNT Ztied.B, Zop1.H, Zop2.H" + public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, Vector right); + + /// svint16_t svrsubhnt[_s32](svint16_t even, svint32_t op1, svint32_t op2) : "RSUBHNT Ztied.H, Zop1.S, Zop2.S" + public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, Vector right); + + /// svint32_t svrsubhnt[_s64](svint32_t even, svint64_t op1, svint64_t op2) : "RSUBHNT Ztied.S, Zop1.D, Zop2.D" + public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, Vector right); + + /// svuint8_t svrsubhnt[_u16](svuint8_t even, svuint16_t op1, svuint16_t op2) : "RSUBHNT Ztied.B, Zop1.H, Zop2.H" + public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, Vector right); + + /// svuint16_t svrsubhnt[_u32](svuint16_t even, svuint32_t op1, svuint32_t op2) : "RSUBHNT Ztied.H, Zop1.S, Zop2.S" + public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, Vector right); + + /// svuint32_t svrsubhnt[_u64](svuint32_t even, svuint64_t op1, svuint64_t op2) : "RSUBHNT Ztied.S, Zop1.D, Zop2.D" + public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, Vector right); + + + /// SaturatingAbs : Saturating absolute value + + /// svint8_t svqabs[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) : "SQABS Ztied.B, Pg/M, Zop.B" or "MOVPRFX Zresult, Zinactive; SQABS Zresult.B, Pg/M, Zop.B" + /// svint8_t svqabs[_s8]_x(svbool_t pg, svint8_t op) : "SQABS Ztied.B, Pg/M, Ztied.B" or "MOVPRFX Zresult, Zop; SQABS Zresult.B, Pg/M, Zop.B" + /// svint8_t svqabs[_s8]_z(svbool_t pg, svint8_t op) : "MOVPRFX Zresult.B, Pg/Z, Zop.B; SQABS Zresult.B, Pg/M, Zop.B" + public static unsafe Vector SaturatingAbs(Vector value); + + /// svint16_t svqabs[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) : "SQABS Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; SQABS Zresult.H, Pg/M, Zop.H" + /// svint16_t svqabs[_s16]_x(svbool_t pg, svint16_t op) : "SQABS Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; SQABS Zresult.H, Pg/M, Zop.H" + /// svint16_t svqabs[_s16]_z(svbool_t pg, svint16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; SQABS Zresult.H, Pg/M, Zop.H" + public static unsafe Vector SaturatingAbs(Vector value); + + /// svint32_t svqabs[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) : "SQABS Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; SQABS Zresult.S, Pg/M, Zop.S" + /// svint32_t svqabs[_s32]_x(svbool_t pg, svint32_t op) : "SQABS Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; SQABS Zresult.S, Pg/M, Zop.S" + /// svint32_t svqabs[_s32]_z(svbool_t pg, svint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; SQABS Zresult.S, Pg/M, Zop.S" + public static unsafe Vector SaturatingAbs(Vector value); + + /// svint64_t svqabs[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) : "SQABS Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; SQABS Zresult.D, Pg/M, Zop.D" + /// svint64_t svqabs[_s64]_x(svbool_t pg, svint64_t op) : "SQABS Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; SQABS Zresult.D, Pg/M, Zop.D" + /// svint64_t svqabs[_s64]_z(svbool_t pg, svint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; SQABS Zresult.D, Pg/M, Zop.D" + public static unsafe Vector SaturatingAbs(Vector value); + + + /// SaturatingDoublingMultiplyAddWideningLower : Saturating doubling multiply-add long (bottom) + + /// svint16_t svqdmlalb[_s16](svint16_t op1, svint8_t op2, svint8_t op3) : "SQDMLALB Ztied1.H, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; SQDMLALB Zresult.H, Zop2.B, Zop3.B" + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLower(Vector op1, Vector op2, Vector op3); + + /// svint32_t svqdmlalb[_s32](svint32_t op1, svint16_t op2, svint16_t op3) : "SQDMLALB Ztied1.S, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; SQDMLALB Zresult.S, Zop2.H, Zop3.H" + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLower(Vector op1, Vector op2, Vector op3); + + /// svint64_t svqdmlalb[_s64](svint64_t op1, svint32_t op2, svint32_t op3) : "SQDMLALB Ztied1.D, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; SQDMLALB Zresult.D, Zop2.S, Zop3.S" + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLower(Vector op1, Vector op2, Vector op3); + + /// svint32_t svqdmlalb_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) : "SQDMLALB Ztied1.S, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; SQDMLALB Zresult.S, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index); + + /// svint64_t svqdmlalb_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) : "SQDMLALB Ztied1.D, Zop2.S, Zop3.S[imm_index]" or "MOVPRFX Zresult, Zop1; SQDMLALB Zresult.D, Zop2.S, Zop3.S[imm_index]" + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index); + + + /// SaturatingDoublingMultiplyAddWideningLowerUpper : Saturating doubling multiply-add long (bottom × top) + + /// svint16_t svqdmlalbt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) : "SQDMLALBT Ztied1.H, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; SQDMLALBT Zresult.H, Zop2.B, Zop3.B" + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLowerUpper(Vector op1, Vector op2, Vector op3); + + /// svint32_t svqdmlalbt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) : "SQDMLALBT Ztied1.S, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; SQDMLALBT Zresult.S, Zop2.H, Zop3.H" + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLowerUpper(Vector op1, Vector op2, Vector op3); + + /// svint64_t svqdmlalbt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) : "SQDMLALBT Ztied1.D, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; SQDMLALBT Zresult.D, Zop2.S, Zop3.S" + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLowerUpper(Vector op1, Vector op2, Vector op3); + + + /// SaturatingDoublingMultiplyAddWideningUpper : Saturating doubling multiply-add long (top) + + /// svint16_t svqdmlalt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) : "SQDMLALT Ztied1.H, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; SQDMLALT Zresult.H, Zop2.B, Zop3.B" + public static unsafe Vector SaturatingDoublingMultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3); + + /// svint32_t svqdmlalt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) : "SQDMLALT Ztied1.S, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; SQDMLALT Zresult.S, Zop2.H, Zop3.H" + public static unsafe Vector SaturatingDoublingMultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3); + + /// svint64_t svqdmlalt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) : "SQDMLALT Ztied1.D, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; SQDMLALT Zresult.D, Zop2.S, Zop3.S" + public static unsafe Vector SaturatingDoublingMultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3); + + /// svint32_t svqdmlalt_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) : "SQDMLALT Ztied1.S, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; SQDMLALT Zresult.S, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector SaturatingDoublingMultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index); + + /// svint64_t svqdmlalt_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) : "SQDMLALT Ztied1.D, Zop2.S, Zop3.S[imm_index]" or "MOVPRFX Zresult, Zop1; SQDMLALT Zresult.D, Zop2.S, Zop3.S[imm_index]" + public static unsafe Vector SaturatingDoublingMultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index); + + + /// SaturatingDoublingMultiplyHigh : Saturating doubling multiply high + + /// svint8_t svqdmulh[_s8](svint8_t op1, svint8_t op2) : "SQDMULH Zresult.B, Zop1.B, Zop2.B" + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector left, Vector right); + + /// svint16_t svqdmulh[_s16](svint16_t op1, svint16_t op2) : "SQDMULH Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector left, Vector right); + + /// svint32_t svqdmulh[_s32](svint32_t op1, svint32_t op2) : "SQDMULH Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector left, Vector right); + + /// svint64_t svqdmulh[_s64](svint64_t op1, svint64_t op2) : "SQDMULH Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector left, Vector right); + + /// svint16_t svqdmulh_lane[_s16](svint16_t op1, svint16_t op2, uint64_t imm_index) : "SQDMULH Zresult.H, Zop1.H, Zop2.H[imm_index]" + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector op1, Vector op2, ulong imm_index); + + /// svint32_t svqdmulh_lane[_s32](svint32_t op1, svint32_t op2, uint64_t imm_index) : "SQDMULH Zresult.S, Zop1.S, Zop2.S[imm_index]" + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector op1, Vector op2, ulong imm_index); + + /// svint64_t svqdmulh_lane[_s64](svint64_t op1, svint64_t op2, uint64_t imm_index) : "SQDMULH Zresult.D, Zop1.D, Zop2.D[imm_index]" + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector op1, Vector op2, ulong imm_index); + + + /// SaturatingDoublingMultiplySubtractWideningLower : Saturating doubling multiply-subtract long (bottom) + + /// svint16_t svqdmlslb[_s16](svint16_t op1, svint8_t op2, svint8_t op3) : "SQDMLSLB Ztied1.H, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; SQDMLSLB Zresult.H, Zop2.B, Zop3.B" + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3); + + /// svint32_t svqdmlslb[_s32](svint32_t op1, svint16_t op2, svint16_t op3) : "SQDMLSLB Ztied1.S, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; SQDMLSLB Zresult.S, Zop2.H, Zop3.H" + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3); + + /// svint64_t svqdmlslb[_s64](svint64_t op1, svint32_t op2, svint32_t op3) : "SQDMLSLB Ztied1.D, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; SQDMLSLB Zresult.D, Zop2.S, Zop3.S" + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3); + + /// svint32_t svqdmlslb_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) : "SQDMLSLB Ztied1.S, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; SQDMLSLB Zresult.S, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index); + + /// svint64_t svqdmlslb_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) : "SQDMLSLB Ztied1.D, Zop2.S, Zop3.S[imm_index]" or "MOVPRFX Zresult, Zop1; SQDMLSLB Zresult.D, Zop2.S, Zop3.S[imm_index]" + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index); + + + /// SaturatingDoublingMultiplySubtractWideningLowerUpper : Saturating doubling multiply-subtract long (bottom × top) + + /// svint16_t svqdmlslbt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) : "SQDMLSLBT Ztied1.H, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; SQDMLSLBT Zresult.H, Zop2.B, Zop3.B" + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLowerUpper(Vector op1, Vector op2, Vector op3); + + /// svint32_t svqdmlslbt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) : "SQDMLSLBT Ztied1.S, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; SQDMLSLBT Zresult.S, Zop2.H, Zop3.H" + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLowerUpper(Vector op1, Vector op2, Vector op3); + + /// svint64_t svqdmlslbt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) : "SQDMLSLBT Ztied1.D, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; SQDMLSLBT Zresult.D, Zop2.S, Zop3.S" + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLowerUpper(Vector op1, Vector op2, Vector op3); + + + /// SaturatingDoublingMultiplySubtractWideningUpper : Saturating doubling multiply-subtract long (top) + + /// svint16_t svqdmlslt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) : "SQDMLSLT Ztied1.H, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; SQDMLSLT Zresult.H, Zop2.B, Zop3.B" + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3); + + /// svint32_t svqdmlslt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) : "SQDMLSLT Ztied1.S, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; SQDMLSLT Zresult.S, Zop2.H, Zop3.H" + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3); + + /// svint64_t svqdmlslt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) : "SQDMLSLT Ztied1.D, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; SQDMLSLT Zresult.D, Zop2.S, Zop3.S" + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3); + + /// svint32_t svqdmlslt_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) : "SQDMLSLT Ztied1.S, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; SQDMLSLT Zresult.S, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index); + + /// svint64_t svqdmlslt_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) : "SQDMLSLT Ztied1.D, Zop2.S, Zop3.S[imm_index]" or "MOVPRFX Zresult, Zop1; SQDMLSLT Zresult.D, Zop2.S, Zop3.S[imm_index]" + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index); + + + /// SaturatingDoublingMultiplyWideningLower : Saturating doubling multiply long (bottom) + + /// svint16_t svqdmullb[_s16](svint8_t op1, svint8_t op2) : "SQDMULLB Zresult.H, Zop1.B, Zop2.B" + public static unsafe Vector SaturatingDoublingMultiplyWideningLower(Vector left, Vector right); + + /// svint32_t svqdmullb[_s32](svint16_t op1, svint16_t op2) : "SQDMULLB Zresult.S, Zop1.H, Zop2.H" + public static unsafe Vector SaturatingDoublingMultiplyWideningLower(Vector left, Vector right); + + /// svint64_t svqdmullb[_s64](svint32_t op1, svint32_t op2) : "SQDMULLB Zresult.D, Zop1.S, Zop2.S" + public static unsafe Vector SaturatingDoublingMultiplyWideningLower(Vector left, Vector right); + + /// svint32_t svqdmullb_lane[_s32](svint16_t op1, svint16_t op2, uint64_t imm_index) : "SQDMULLB Zresult.S, Zop1.H, Zop2.H[imm_index]" + public static unsafe Vector SaturatingDoublingMultiplyWideningLower(Vector op1, Vector op2, ulong imm_index); + + /// svint64_t svqdmullb_lane[_s64](svint32_t op1, svint32_t op2, uint64_t imm_index) : "SQDMULLB Zresult.D, Zop1.S, Zop2.S[imm_index]" + public static unsafe Vector SaturatingDoublingMultiplyWideningLower(Vector op1, Vector op2, ulong imm_index); + + + /// SaturatingDoublingMultiplyWideningUpper : Saturating doubling multiply long (top) + + /// svint16_t svqdmullt[_s16](svint8_t op1, svint8_t op2) : "SQDMULLT Zresult.H, Zop1.B, Zop2.B" + public static unsafe Vector SaturatingDoublingMultiplyWideningUpper(Vector left, Vector right); + + /// svint32_t svqdmullt[_s32](svint16_t op1, svint16_t op2) : "SQDMULLT Zresult.S, Zop1.H, Zop2.H" + public static unsafe Vector SaturatingDoublingMultiplyWideningUpper(Vector left, Vector right); + + /// svint64_t svqdmullt[_s64](svint32_t op1, svint32_t op2) : "SQDMULLT Zresult.D, Zop1.S, Zop2.S" + public static unsafe Vector SaturatingDoublingMultiplyWideningUpper(Vector left, Vector right); + + /// svint32_t svqdmullt_lane[_s32](svint16_t op1, svint16_t op2, uint64_t imm_index) : "SQDMULLT Zresult.S, Zop1.H, Zop2.H[imm_index]" + public static unsafe Vector SaturatingDoublingMultiplyWideningUpper(Vector op1, Vector op2, ulong imm_index); + + /// svint64_t svqdmullt_lane[_s64](svint32_t op1, svint32_t op2, uint64_t imm_index) : "SQDMULLT Zresult.D, Zop1.S, Zop2.S[imm_index]" + public static unsafe Vector SaturatingDoublingMultiplyWideningUpper(Vector op1, Vector op2, ulong imm_index); + + + /// SaturatingNegate : Saturating negate + + /// svint8_t svqneg[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) : "SQNEG Ztied.B, Pg/M, Zop.B" or "MOVPRFX Zresult, Zinactive; SQNEG Zresult.B, Pg/M, Zop.B" + /// svint8_t svqneg[_s8]_x(svbool_t pg, svint8_t op) : "SQNEG Ztied.B, Pg/M, Ztied.B" or "MOVPRFX Zresult, Zop; SQNEG Zresult.B, Pg/M, Zop.B" + /// svint8_t svqneg[_s8]_z(svbool_t pg, svint8_t op) : "MOVPRFX Zresult.B, Pg/Z, Zop.B; SQNEG Zresult.B, Pg/M, Zop.B" + public static unsafe Vector SaturatingNegate(Vector value); + + /// svint16_t svqneg[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) : "SQNEG Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; SQNEG Zresult.H, Pg/M, Zop.H" + /// svint16_t svqneg[_s16]_x(svbool_t pg, svint16_t op) : "SQNEG Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; SQNEG Zresult.H, Pg/M, Zop.H" + /// svint16_t svqneg[_s16]_z(svbool_t pg, svint16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; SQNEG Zresult.H, Pg/M, Zop.H" + public static unsafe Vector SaturatingNegate(Vector value); + + /// svint32_t svqneg[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) : "SQNEG Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; SQNEG Zresult.S, Pg/M, Zop.S" + /// svint32_t svqneg[_s32]_x(svbool_t pg, svint32_t op) : "SQNEG Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; SQNEG Zresult.S, Pg/M, Zop.S" + /// svint32_t svqneg[_s32]_z(svbool_t pg, svint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; SQNEG Zresult.S, Pg/M, Zop.S" + public static unsafe Vector SaturatingNegate(Vector value); + + /// svint64_t svqneg[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) : "SQNEG Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; SQNEG Zresult.D, Pg/M, Zop.D" + /// svint64_t svqneg[_s64]_x(svbool_t pg, svint64_t op) : "SQNEG Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; SQNEG Zresult.D, Pg/M, Zop.D" + /// svint64_t svqneg[_s64]_z(svbool_t pg, svint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; SQNEG Zresult.D, Pg/M, Zop.D" + public static unsafe Vector SaturatingNegate(Vector value); + + + /// SaturatingRoundingDoublingMultiplyAddHigh : Saturating rounding doubling multiply-add high + + /// svint8_t svqrdmlah[_s8](svint8_t op1, svint8_t op2, svint8_t op3) : "SQRDMLAH Ztied1.B, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; SQRDMLAH Zresult.B, Zop2.B, Zop3.B" + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3); + + /// svint16_t svqrdmlah[_s16](svint16_t op1, svint16_t op2, svint16_t op3) : "SQRDMLAH Ztied1.H, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; SQRDMLAH Zresult.H, Zop2.H, Zop3.H" + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3); + + /// svint32_t svqrdmlah[_s32](svint32_t op1, svint32_t op2, svint32_t op3) : "SQRDMLAH Ztied1.S, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; SQRDMLAH Zresult.S, Zop2.S, Zop3.S" + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3); + + /// svint64_t svqrdmlah[_s64](svint64_t op1, svint64_t op2, svint64_t op3) : "SQRDMLAH Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; SQRDMLAH Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3); + + /// svint16_t svqrdmlah_lane[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) : "SQRDMLAH Ztied1.H, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; SQRDMLAH Zresult.H, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3, ulong imm_index); + + /// svint32_t svqrdmlah_lane[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) : "SQRDMLAH Ztied1.S, Zop2.S, Zop3.S[imm_index]" or "MOVPRFX Zresult, Zop1; SQRDMLAH Zresult.S, Zop2.S, Zop3.S[imm_index]" + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3, ulong imm_index); + + /// svint64_t svqrdmlah_lane[_s64](svint64_t op1, svint64_t op2, svint64_t op3, uint64_t imm_index) : "SQRDMLAH Ztied1.D, Zop2.D, Zop3.D[imm_index]" or "MOVPRFX Zresult, Zop1; SQRDMLAH Zresult.D, Zop2.D, Zop3.D[imm_index]" + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3, ulong imm_index); + + + /// SaturatingRoundingDoublingMultiplyHigh : Saturating rounding doubling multiply high + + /// svint8_t svqrdmulh[_s8](svint8_t op1, svint8_t op2) : "SQRDMULH Zresult.B, Zop1.B, Zop2.B" + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector left, Vector right); + + /// svint16_t svqrdmulh[_s16](svint16_t op1, svint16_t op2) : "SQRDMULH Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector left, Vector right); + + /// svint32_t svqrdmulh[_s32](svint32_t op1, svint32_t op2) : "SQRDMULH Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector left, Vector right); + + /// svint64_t svqrdmulh[_s64](svint64_t op1, svint64_t op2) : "SQRDMULH Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector left, Vector right); + + /// svint16_t svqrdmulh_lane[_s16](svint16_t op1, svint16_t op2, uint64_t imm_index) : "SQRDMULH Zresult.H, Zop1.H, Zop2.H[imm_index]" + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector op1, Vector op2, ulong imm_index); + + /// svint32_t svqrdmulh_lane[_s32](svint32_t op1, svint32_t op2, uint64_t imm_index) : "SQRDMULH Zresult.S, Zop1.S, Zop2.S[imm_index]" + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector op1, Vector op2, ulong imm_index); + + /// svint64_t svqrdmulh_lane[_s64](svint64_t op1, svint64_t op2, uint64_t imm_index) : "SQRDMULH Zresult.D, Zop1.D, Zop2.D[imm_index]" + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector op1, Vector op2, ulong imm_index); + + + /// SaturatingRoundingDoublingMultiplySubtractHigh : Saturating rounding doubling multiply-subtract high + + /// svint8_t svqrdmlsh[_s8](svint8_t op1, svint8_t op2, svint8_t op3) : "SQRDMLSH Ztied1.B, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; SQRDMLSH Zresult.B, Zop2.B, Zop3.B" + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3); + + /// svint16_t svqrdmlsh[_s16](svint16_t op1, svint16_t op2, svint16_t op3) : "SQRDMLSH Ztied1.H, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; SQRDMLSH Zresult.H, Zop2.H, Zop3.H" + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3); + + /// svint32_t svqrdmlsh[_s32](svint32_t op1, svint32_t op2, svint32_t op3) : "SQRDMLSH Ztied1.S, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; SQRDMLSH Zresult.S, Zop2.S, Zop3.S" + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3); + + /// svint64_t svqrdmlsh[_s64](svint64_t op1, svint64_t op2, svint64_t op3) : "SQRDMLSH Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; SQRDMLSH Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3); + + /// svint16_t svqrdmlsh_lane[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) : "SQRDMLSH Ztied1.H, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; SQRDMLSH Zresult.H, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3, ulong imm_index); + + /// svint32_t svqrdmlsh_lane[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) : "SQRDMLSH Ztied1.S, Zop2.S, Zop3.S[imm_index]" or "MOVPRFX Zresult, Zop1; SQRDMLSH Zresult.S, Zop2.S, Zop3.S[imm_index]" + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3, ulong imm_index); + + /// svint64_t svqrdmlsh_lane[_s64](svint64_t op1, svint64_t op2, svint64_t op3, uint64_t imm_index) : "SQRDMLSH Ztied1.D, Zop2.D, Zop3.D[imm_index]" or "MOVPRFX Zresult, Zop1; SQRDMLSH Zresult.D, Zop2.D, Zop3.D[imm_index]" + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3, ulong imm_index); + + + /// SubtractHighNarowingLower : Subtract narrow high part (bottom) + + /// svint8_t svsubhnb[_s16](svint16_t op1, svint16_t op2) : "SUBHNB Zresult.B, Zop1.H, Zop2.H" + public static unsafe Vector SubtractHighNarowingLower(Vector left, Vector right); + + /// svint16_t svsubhnb[_s32](svint32_t op1, svint32_t op2) : "SUBHNB Zresult.H, Zop1.S, Zop2.S" + public static unsafe Vector SubtractHighNarowingLower(Vector left, Vector right); + + /// svint32_t svsubhnb[_s64](svint64_t op1, svint64_t op2) : "SUBHNB Zresult.S, Zop1.D, Zop2.D" + public static unsafe Vector SubtractHighNarowingLower(Vector left, Vector right); + + /// svuint8_t svsubhnb[_u16](svuint16_t op1, svuint16_t op2) : "SUBHNB Zresult.B, Zop1.H, Zop2.H" + public static unsafe Vector SubtractHighNarowingLower(Vector left, Vector right); + + /// svuint16_t svsubhnb[_u32](svuint32_t op1, svuint32_t op2) : "SUBHNB Zresult.H, Zop1.S, Zop2.S" + public static unsafe Vector SubtractHighNarowingLower(Vector left, Vector right); + + /// svuint32_t svsubhnb[_u64](svuint64_t op1, svuint64_t op2) : "SUBHNB Zresult.S, Zop1.D, Zop2.D" + public static unsafe Vector SubtractHighNarowingLower(Vector left, Vector right); + + + /// SubtractHighNarowingUpper : Subtract narrow high part (top) + + /// svint8_t svsubhnt[_s16](svint8_t even, svint16_t op1, svint16_t op2) : "SUBHNT Ztied.B, Zop1.H, Zop2.H" + public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, Vector right); + + /// svint16_t svsubhnt[_s32](svint16_t even, svint32_t op1, svint32_t op2) : "SUBHNT Ztied.H, Zop1.S, Zop2.S" + public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, Vector right); + + /// svint32_t svsubhnt[_s64](svint32_t even, svint64_t op1, svint64_t op2) : "SUBHNT Ztied.S, Zop1.D, Zop2.D" + public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, Vector right); + + /// svuint8_t svsubhnt[_u16](svuint8_t even, svuint16_t op1, svuint16_t op2) : "SUBHNT Ztied.B, Zop1.H, Zop2.H" + public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, Vector right); + + /// svuint16_t svsubhnt[_u32](svuint16_t even, svuint32_t op1, svuint32_t op2) : "SUBHNT Ztied.H, Zop1.S, Zop2.S" + public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, Vector right); + + /// svuint32_t svsubhnt[_u64](svuint32_t even, svuint64_t op1, svuint64_t op2) : "SUBHNT Ztied.S, Zop1.D, Zop2.D" + public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, Vector right); + + + /// SubtractSaturate : Saturating subtract + + /// svint8_t svqsub[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) : "SQSUB Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; SQSUB Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svqsub[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) : "SQSUB Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "SQSUBR Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "SQSUB Zresult.B, Zop1.B, Zop2.B" + /// svint8_t svqsub[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; SQSUB Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; SQSUBR Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector SubtractSaturate(Vector left, Vector right); + + /// svint16_t svqsub[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) : "SQSUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; SQSUB Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svqsub[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) : "SQSUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "SQSUBR Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "SQSUB Zresult.H, Zop1.H, Zop2.H" + /// svint16_t svqsub[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; SQSUB Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; SQSUBR Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector SubtractSaturate(Vector left, Vector right); + + /// svint32_t svqsub[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) : "SQSUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; SQSUB Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svqsub[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) : "SQSUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "SQSUBR Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "SQSUB Zresult.S, Zop1.S, Zop2.S" + /// svint32_t svqsub[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; SQSUB Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; SQSUBR Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector SubtractSaturate(Vector left, Vector right); + + /// svint64_t svqsub[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) : "SQSUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; SQSUB Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svqsub[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) : "SQSUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "SQSUBR Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "SQSUB Zresult.D, Zop1.D, Zop2.D" + /// svint64_t svqsub[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; SQSUB Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; SQSUBR Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector SubtractSaturate(Vector left, Vector right); + + /// svuint8_t svqsub[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) : "UQSUB Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; UQSUB Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svqsub[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) : "UQSUB Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "UQSUBR Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "UQSUB Zresult.B, Zop1.B, Zop2.B" + /// svuint8_t svqsub[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; UQSUB Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; UQSUBR Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector SubtractSaturate(Vector left, Vector right); + + /// svuint16_t svqsub[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) : "UQSUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; UQSUB Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svqsub[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) : "UQSUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "UQSUBR Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "UQSUB Zresult.H, Zop1.H, Zop2.H" + /// svuint16_t svqsub[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; UQSUB Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; UQSUBR Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector SubtractSaturate(Vector left, Vector right); + + /// svuint32_t svqsub[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) : "UQSUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; UQSUB Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svqsub[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) : "UQSUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "UQSUBR Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "UQSUB Zresult.S, Zop1.S, Zop2.S" + /// svuint32_t svqsub[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; UQSUB Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; UQSUBR Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector SubtractSaturate(Vector left, Vector right); + + /// svuint64_t svqsub[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) : "UQSUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; UQSUB Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svqsub[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) : "UQSUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "UQSUBR Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "UQSUB Zresult.D, Zop1.D, Zop2.D" + /// svuint64_t svqsub[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; UQSUB Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; UQSUBR Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector SubtractSaturate(Vector left, Vector right); + + + /// SubtractSaturateReversed : Saturating subtract reversed + + /// svint8_t svqsubr[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) : "SQSUBR Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; SQSUBR Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svqsubr[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) : "SQSUBR Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "SQSUB Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "SQSUB Zresult.B, Zop2.B, Zop1.B" + /// svint8_t svqsubr[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; SQSUBR Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; SQSUB Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right); + + /// svint16_t svqsubr[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) : "SQSUBR Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; SQSUBR Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svqsubr[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) : "SQSUBR Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "SQSUB Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "SQSUB Zresult.H, Zop2.H, Zop1.H" + /// svint16_t svqsubr[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; SQSUBR Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; SQSUB Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right); + + /// svint32_t svqsubr[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) : "SQSUBR Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; SQSUBR Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svqsubr[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) : "SQSUBR Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "SQSUB Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "SQSUB Zresult.S, Zop2.S, Zop1.S" + /// svint32_t svqsubr[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; SQSUBR Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; SQSUB Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right); + + /// svint64_t svqsubr[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) : "SQSUBR Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; SQSUBR Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svqsubr[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) : "SQSUBR Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "SQSUB Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "SQSUB Zresult.D, Zop2.D, Zop1.D" + /// svint64_t svqsubr[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; SQSUBR Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; SQSUB Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right); + + /// svuint8_t svqsubr[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) : "UQSUBR Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; UQSUBR Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svqsubr[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) : "UQSUBR Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "UQSUB Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "UQSUB Zresult.B, Zop2.B, Zop1.B" + /// svuint8_t svqsubr[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; UQSUBR Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; UQSUB Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right); + + /// svuint16_t svqsubr[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) : "UQSUBR Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; UQSUBR Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svqsubr[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) : "UQSUBR Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "UQSUB Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "UQSUB Zresult.H, Zop2.H, Zop1.H" + /// svuint16_t svqsubr[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; UQSUBR Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; UQSUB Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right); + + /// svuint32_t svqsubr[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) : "UQSUBR Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; UQSUBR Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svqsubr[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) : "UQSUBR Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "UQSUB Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "UQSUB Zresult.S, Zop2.S, Zop1.S" + /// svuint32_t svqsubr[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; UQSUBR Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; UQSUB Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right); + + /// svuint64_t svqsubr[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) : "UQSUBR Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; UQSUBR Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svqsubr[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) : "UQSUBR Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "UQSUB Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "UQSUB Zresult.D, Zop2.D, Zop1.D" + /// svuint64_t svqsubr[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; UQSUBR Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; UQSUB Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right); + + + /// SubtractWideLower : Subtract wide (bottom) + + /// svint16_t svsubwb[_s16](svint16_t op1, svint8_t op2) : "SSUBWB Zresult.H, Zop1.H, Zop2.B" + public static unsafe Vector SubtractWideLower(Vector left, Vector right); + + /// svint32_t svsubwb[_s32](svint32_t op1, svint16_t op2) : "SSUBWB Zresult.S, Zop1.S, Zop2.H" + public static unsafe Vector SubtractWideLower(Vector left, Vector right); + + /// svint64_t svsubwb[_s64](svint64_t op1, svint32_t op2) : "SSUBWB Zresult.D, Zop1.D, Zop2.S" + public static unsafe Vector SubtractWideLower(Vector left, Vector right); + + /// svuint16_t svsubwb[_u16](svuint16_t op1, svuint8_t op2) : "USUBWB Zresult.H, Zop1.H, Zop2.B" + public static unsafe Vector SubtractWideLower(Vector left, Vector right); + + /// svuint32_t svsubwb[_u32](svuint32_t op1, svuint16_t op2) : "USUBWB Zresult.S, Zop1.S, Zop2.H" + public static unsafe Vector SubtractWideLower(Vector left, Vector right); + + /// svuint64_t svsubwb[_u64](svuint64_t op1, svuint32_t op2) : "USUBWB Zresult.D, Zop1.D, Zop2.S" + public static unsafe Vector SubtractWideLower(Vector left, Vector right); + + + /// SubtractWideUpper : Subtract wide (top) + + /// svint16_t svsubwt[_s16](svint16_t op1, svint8_t op2) : "SSUBWT Zresult.H, Zop1.H, Zop2.B" + public static unsafe Vector SubtractWideUpper(Vector left, Vector right); + + /// svint32_t svsubwt[_s32](svint32_t op1, svint16_t op2) : "SSUBWT Zresult.S, Zop1.S, Zop2.H" + public static unsafe Vector SubtractWideUpper(Vector left, Vector right); + + /// svint64_t svsubwt[_s64](svint64_t op1, svint32_t op2) : "SSUBWT Zresult.D, Zop1.D, Zop2.S" + public static unsafe Vector SubtractWideUpper(Vector left, Vector right); + + /// svuint16_t svsubwt[_u16](svuint16_t op1, svuint8_t op2) : "USUBWT Zresult.H, Zop1.H, Zop2.B" + public static unsafe Vector SubtractWideUpper(Vector left, Vector right); + + /// svuint32_t svsubwt[_u32](svuint32_t op1, svuint16_t op2) : "USUBWT Zresult.S, Zop1.S, Zop2.H" + public static unsafe Vector SubtractWideUpper(Vector left, Vector right); + + /// svuint64_t svsubwt[_u64](svuint64_t op1, svuint32_t op2) : "USUBWT Zresult.D, Zop1.D, Zop2.S" + public static unsafe Vector SubtractWideUpper(Vector left, Vector right); + + + /// SubtractWideningLower : Subtract long (bottom) + + /// svint16_t svsublb[_s16](svint8_t op1, svint8_t op2) : "SSUBLB Zresult.H, Zop1.B, Zop2.B" + public static unsafe Vector SubtractWideningLower(Vector left, Vector right); + + /// svint32_t svsublb[_s32](svint16_t op1, svint16_t op2) : "SSUBLB Zresult.S, Zop1.H, Zop2.H" + public static unsafe Vector SubtractWideningLower(Vector left, Vector right); + + /// svint64_t svsublb[_s64](svint32_t op1, svint32_t op2) : "SSUBLB Zresult.D, Zop1.S, Zop2.S" + public static unsafe Vector SubtractWideningLower(Vector left, Vector right); + + /// svuint16_t svsublb[_u16](svuint8_t op1, svuint8_t op2) : "USUBLB Zresult.H, Zop1.B, Zop2.B" + public static unsafe Vector SubtractWideningLower(Vector left, Vector right); + + /// svuint32_t svsublb[_u32](svuint16_t op1, svuint16_t op2) : "USUBLB Zresult.S, Zop1.H, Zop2.H" + public static unsafe Vector SubtractWideningLower(Vector left, Vector right); + + /// svuint64_t svsublb[_u64](svuint32_t op1, svuint32_t op2) : "USUBLB Zresult.D, Zop1.S, Zop2.S" + public static unsafe Vector SubtractWideningLower(Vector left, Vector right); + + + /// SubtractWideningLowerUpper : Subtract long (bottom - top) + + /// svint16_t svsublbt[_s16](svint8_t op1, svint8_t op2) : "SSUBLBT Zresult.H, Zop1.B, Zop2.B" + public static unsafe Vector SubtractWideningLowerUpper(Vector left, Vector right); + + /// svint32_t svsublbt[_s32](svint16_t op1, svint16_t op2) : "SSUBLBT Zresult.S, Zop1.H, Zop2.H" + public static unsafe Vector SubtractWideningLowerUpper(Vector left, Vector right); + + /// svint64_t svsublbt[_s64](svint32_t op1, svint32_t op2) : "SSUBLBT Zresult.D, Zop1.S, Zop2.S" + public static unsafe Vector SubtractWideningLowerUpper(Vector left, Vector right); + + + /// SubtractWideningUpper : Subtract long (top) + + /// svint16_t svsublt[_s16](svint8_t op1, svint8_t op2) : "SSUBLT Zresult.H, Zop1.B, Zop2.B" + public static unsafe Vector SubtractWideningUpper(Vector left, Vector right); + + /// svint32_t svsublt[_s32](svint16_t op1, svint16_t op2) : "SSUBLT Zresult.S, Zop1.H, Zop2.H" + public static unsafe Vector SubtractWideningUpper(Vector left, Vector right); + + /// svint64_t svsublt[_s64](svint32_t op1, svint32_t op2) : "SSUBLT Zresult.D, Zop1.S, Zop2.S" + public static unsafe Vector SubtractWideningUpper(Vector left, Vector right); + + /// svuint16_t svsublt[_u16](svuint8_t op1, svuint8_t op2) : "USUBLT Zresult.H, Zop1.B, Zop2.B" + public static unsafe Vector SubtractWideningUpper(Vector left, Vector right); + + /// svuint32_t svsublt[_u32](svuint16_t op1, svuint16_t op2) : "USUBLT Zresult.S, Zop1.H, Zop2.H" + public static unsafe Vector SubtractWideningUpper(Vector left, Vector right); + + /// svuint64_t svsublt[_u64](svuint32_t op1, svuint32_t op2) : "USUBLT Zresult.D, Zop1.S, Zop2.S" + public static unsafe Vector SubtractWideningUpper(Vector left, Vector right); + + + /// SubtractWideningUpperLower : Subtract long (top - bottom) + + /// svint16_t svsubltb[_s16](svint8_t op1, svint8_t op2) : "SSUBLTB Zresult.H, Zop1.B, Zop2.B" + public static unsafe Vector SubtractWideningUpperLower(Vector left, Vector right); + + /// svint32_t svsubltb[_s32](svint16_t op1, svint16_t op2) : "SSUBLTB Zresult.S, Zop1.H, Zop2.H" + public static unsafe Vector SubtractWideningUpperLower(Vector left, Vector right); + + /// svint64_t svsubltb[_s64](svint32_t op1, svint32_t op2) : "SSUBLTB Zresult.D, Zop1.S, Zop2.S" + public static unsafe Vector SubtractWideningUpperLower(Vector left, Vector right); + + + /// SubtractWithBorrowWideningLower : Subtract with borrow long (bottom) + + /// svuint32_t svsbclb[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) : "SBCLB Ztied1.S, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; SBCLB Zresult.S, Zop2.S, Zop3.S" + public static unsafe Vector SubtractWithBorrowWideningLower(Vector op1, Vector op2, Vector op3); + + /// svuint64_t svsbclb[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) : "SBCLB Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; SBCLB Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector SubtractWithBorrowWideningLower(Vector op1, Vector op2, Vector op3); + + + /// SubtractWithBorrowWideningUpper : Subtract with borrow long (top) + + /// svuint32_t svsbclt[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) : "SBCLT Ztied1.S, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; SBCLT Zresult.S, Zop2.S, Zop3.S" + public static unsafe Vector SubtractWithBorrowWideningUpper(Vector op1, Vector op2, Vector op3); + + /// svuint64_t svsbclt[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) : "SBCLT Ztied1.D, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; SBCLT Zresult.D, Zop2.D, Zop3.D" + public static unsafe Vector SubtractWithBorrowWideningUpper(Vector op1, Vector op2, Vector op3); + + + /// total method signatures: 412 + /// total method names: 70 +} + + + /// Rejected: + /// public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, sbyte right); // svaba[_n_s8] + /// public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, short right); // svaba[_n_s16] + /// public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, int right); // svaba[_n_s32] + /// public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, long right); // svaba[_n_s64] + /// public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, byte right); // svaba[_n_u8] + /// public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, ushort right); // svaba[_n_u16] + /// public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, uint right); // svaba[_n_u32] + /// public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, ulong right); // svaba[_n_u64] + /// public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, sbyte right); // svabalb[_n_s16] + /// public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, short right); // svabalb[_n_s32] + /// public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, int right); // svabalb[_n_s64] + /// public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, byte right); // svabalb[_n_u16] + /// public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, ushort right); // svabalb[_n_u32] + /// public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, uint right); // svabalb[_n_u64] + /// public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, sbyte right); // svabalt[_n_s16] + /// public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, short right); // svabalt[_n_s32] + /// public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, int right); // svabalt[_n_s64] + /// public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, byte right); // svabalt[_n_u16] + /// public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, ushort right); // svabalt[_n_u32] + /// public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, uint right); // svabalt[_n_u64] + /// public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, sbyte right); // svabdlb[_n_s16] + /// public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, short right); // svabdlb[_n_s32] + /// public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, int right); // svabdlb[_n_s64] + /// public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, byte right); // svabdlb[_n_u16] + /// public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, ushort right); // svabdlb[_n_u32] + /// public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, uint right); // svabdlb[_n_u64] + /// public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, sbyte right); // svabdlt[_n_s16] + /// public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, short right); // svabdlt[_n_s32] + /// public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, int right); // svabdlt[_n_s64] + /// public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, byte right); // svabdlt[_n_u16] + /// public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, ushort right); // svabdlt[_n_u32] + /// public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, uint right); // svabdlt[_n_u64] + /// public static unsafe Vector AddCarryWideningLower(Vector op1, Vector op2, uint op3); // svadclb[_n_u32] + /// public static unsafe Vector AddCarryWideningLower(Vector op1, Vector op2, ulong op3); // svadclb[_n_u64] + /// public static unsafe Vector AddCarryWideningUpper(Vector op1, Vector op2, uint op3); // svadclt[_n_u32] + /// public static unsafe Vector AddCarryWideningUpper(Vector op1, Vector op2, ulong op3); // svadclt[_n_u64] + /// public static unsafe Vector AddHighNarowingLower(Vector left, short right); // svaddhnb[_n_s16] + /// public static unsafe Vector AddHighNarowingLower(Vector left, int right); // svaddhnb[_n_s32] + /// public static unsafe Vector AddHighNarowingLower(Vector left, long right); // svaddhnb[_n_s64] + /// public static unsafe Vector AddHighNarowingLower(Vector left, ushort right); // svaddhnb[_n_u16] + /// public static unsafe Vector AddHighNarowingLower(Vector left, uint right); // svaddhnb[_n_u32] + /// public static unsafe Vector AddHighNarowingLower(Vector left, ulong right); // svaddhnb[_n_u64] + /// public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, short right); // svaddhnt[_n_s16] + /// public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, int right); // svaddhnt[_n_s32] + /// public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, long right); // svaddhnt[_n_s64] + /// public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, ushort right); // svaddhnt[_n_u16] + /// public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, uint right); // svaddhnt[_n_u32] + /// public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, ulong right); // svaddhnt[_n_u64] + /// public static unsafe Vector AddSaturate(Vector left, sbyte right); // svqadd[_n_s8]_m or svqadd[_n_s8]_x or svqadd[_n_s8]_z + /// public static unsafe Vector AddSaturate(Vector left, short right); // svqadd[_n_s16]_m or svqadd[_n_s16]_x or svqadd[_n_s16]_z + /// public static unsafe Vector AddSaturate(Vector left, int right); // svqadd[_n_s32]_m or svqadd[_n_s32]_x or svqadd[_n_s32]_z + /// public static unsafe Vector AddSaturate(Vector left, long right); // svqadd[_n_s64]_m or svqadd[_n_s64]_x or svqadd[_n_s64]_z + /// public static unsafe Vector AddSaturate(Vector left, byte right); // svqadd[_n_u8]_m or svqadd[_n_u8]_x or svqadd[_n_u8]_z + /// public static unsafe Vector AddSaturate(Vector left, ushort right); // svqadd[_n_u16]_m or svqadd[_n_u16]_x or svqadd[_n_u16]_z + /// public static unsafe Vector AddSaturate(Vector left, uint right); // svqadd[_n_u32]_m or svqadd[_n_u32]_x or svqadd[_n_u32]_z + /// public static unsafe Vector AddSaturate(Vector left, ulong right); // svqadd[_n_u64]_m or svqadd[_n_u64]_x or svqadd[_n_u64]_z + /// public static unsafe Vector AddSaturateWithSignedAddend(Vector left, sbyte right); // svsqadd[_n_u8]_m or svsqadd[_n_u8]_x or svsqadd[_n_u8]_z + /// public static unsafe Vector AddSaturateWithSignedAddend(Vector left, short right); // svsqadd[_n_u16]_m or svsqadd[_n_u16]_x or svsqadd[_n_u16]_z + /// public static unsafe Vector AddSaturateWithSignedAddend(Vector left, int right); // svsqadd[_n_u32]_m or svsqadd[_n_u32]_x or svsqadd[_n_u32]_z + /// public static unsafe Vector AddSaturateWithSignedAddend(Vector left, long right); // svsqadd[_n_u64]_m or svsqadd[_n_u64]_x or svsqadd[_n_u64]_z + /// public static unsafe Vector AddSaturateWithUnsignedAddend(Vector left, byte right); // svuqadd[_n_s8]_m or svuqadd[_n_s8]_x or svuqadd[_n_s8]_z + /// public static unsafe Vector AddSaturateWithUnsignedAddend(Vector left, ushort right); // svuqadd[_n_s16]_m or svuqadd[_n_s16]_x or svuqadd[_n_s16]_z + /// public static unsafe Vector AddSaturateWithUnsignedAddend(Vector left, uint right); // svuqadd[_n_s32]_m or svuqadd[_n_s32]_x or svuqadd[_n_s32]_z + /// public static unsafe Vector AddSaturateWithUnsignedAddend(Vector left, ulong right); // svuqadd[_n_s64]_m or svuqadd[_n_s64]_x or svuqadd[_n_s64]_z + /// public static unsafe Vector AddWideLower(Vector left, sbyte right); // svaddwb[_n_s16] + /// public static unsafe Vector AddWideLower(Vector left, short right); // svaddwb[_n_s32] + /// public static unsafe Vector AddWideLower(Vector left, int right); // svaddwb[_n_s64] + /// public static unsafe Vector AddWideLower(Vector left, byte right); // svaddwb[_n_u16] + /// public static unsafe Vector AddWideLower(Vector left, ushort right); // svaddwb[_n_u32] + /// public static unsafe Vector AddWideLower(Vector left, uint right); // svaddwb[_n_u64] + /// public static unsafe Vector AddWideUpper(Vector left, sbyte right); // svaddwt[_n_s16] + /// public static unsafe Vector AddWideUpper(Vector left, short right); // svaddwt[_n_s32] + /// public static unsafe Vector AddWideUpper(Vector left, int right); // svaddwt[_n_s64] + /// public static unsafe Vector AddWideUpper(Vector left, byte right); // svaddwt[_n_u16] + /// public static unsafe Vector AddWideUpper(Vector left, ushort right); // svaddwt[_n_u32] + /// public static unsafe Vector AddWideUpper(Vector left, uint right); // svaddwt[_n_u64] + /// public static unsafe Vector AddWideningLower(Vector left, sbyte right); // svaddlb[_n_s16] + /// public static unsafe Vector AddWideningLower(Vector left, short right); // svaddlb[_n_s32] + /// public static unsafe Vector AddWideningLower(Vector left, int right); // svaddlb[_n_s64] + /// public static unsafe Vector AddWideningLower(Vector left, byte right); // svaddlb[_n_u16] + /// public static unsafe Vector AddWideningLower(Vector left, ushort right); // svaddlb[_n_u32] + /// public static unsafe Vector AddWideningLower(Vector left, uint right); // svaddlb[_n_u64] + /// public static unsafe Vector AddWideningLowerUpper(Vector left, sbyte right); // svaddlbt[_n_s16] + /// public static unsafe Vector AddWideningLowerUpper(Vector left, short right); // svaddlbt[_n_s32] + /// public static unsafe Vector AddWideningLowerUpper(Vector left, int right); // svaddlbt[_n_s64] + /// public static unsafe Vector AddWideningUpper(Vector left, sbyte right); // svaddlt[_n_s16] + /// public static unsafe Vector AddWideningUpper(Vector left, short right); // svaddlt[_n_s32] + /// public static unsafe Vector AddWideningUpper(Vector left, int right); // svaddlt[_n_s64] + /// public static unsafe Vector AddWideningUpper(Vector left, byte right); // svaddlt[_n_u16] + /// public static unsafe Vector AddWideningUpper(Vector left, ushort right); // svaddlt[_n_u32] + /// public static unsafe Vector AddWideningUpper(Vector left, uint right); // svaddlt[_n_u64] + /// public static unsafe Vector HalvingAdd(Vector left, sbyte right); // svhadd[_n_s8]_m or svhadd[_n_s8]_x or svhadd[_n_s8]_z + /// public static unsafe Vector HalvingAdd(Vector left, short right); // svhadd[_n_s16]_m or svhadd[_n_s16]_x or svhadd[_n_s16]_z + /// public static unsafe Vector HalvingAdd(Vector left, int right); // svhadd[_n_s32]_m or svhadd[_n_s32]_x or svhadd[_n_s32]_z + /// public static unsafe Vector HalvingAdd(Vector left, long right); // svhadd[_n_s64]_m or svhadd[_n_s64]_x or svhadd[_n_s64]_z + /// public static unsafe Vector HalvingAdd(Vector left, byte right); // svhadd[_n_u8]_m or svhadd[_n_u8]_x or svhadd[_n_u8]_z + /// public static unsafe Vector HalvingAdd(Vector left, ushort right); // svhadd[_n_u16]_m or svhadd[_n_u16]_x or svhadd[_n_u16]_z + /// public static unsafe Vector HalvingAdd(Vector left, uint right); // svhadd[_n_u32]_m or svhadd[_n_u32]_x or svhadd[_n_u32]_z + /// public static unsafe Vector HalvingAdd(Vector left, ulong right); // svhadd[_n_u64]_m or svhadd[_n_u64]_x or svhadd[_n_u64]_z + /// public static unsafe Vector HalvingSubtract(Vector left, sbyte right); // svhsub[_n_s8]_m or svhsub[_n_s8]_x or svhsub[_n_s8]_z + /// public static unsafe Vector HalvingSubtract(Vector left, short right); // svhsub[_n_s16]_m or svhsub[_n_s16]_x or svhsub[_n_s16]_z + /// public static unsafe Vector HalvingSubtract(Vector left, int right); // svhsub[_n_s32]_m or svhsub[_n_s32]_x or svhsub[_n_s32]_z + /// public static unsafe Vector HalvingSubtract(Vector left, long right); // svhsub[_n_s64]_m or svhsub[_n_s64]_x or svhsub[_n_s64]_z + /// public static unsafe Vector HalvingSubtract(Vector left, byte right); // svhsub[_n_u8]_m or svhsub[_n_u8]_x or svhsub[_n_u8]_z + /// public static unsafe Vector HalvingSubtract(Vector left, ushort right); // svhsub[_n_u16]_m or svhsub[_n_u16]_x or svhsub[_n_u16]_z + /// public static unsafe Vector HalvingSubtract(Vector left, uint right); // svhsub[_n_u32]_m or svhsub[_n_u32]_x or svhsub[_n_u32]_z + /// public static unsafe Vector HalvingSubtract(Vector left, ulong right); // svhsub[_n_u64]_m or svhsub[_n_u64]_x or svhsub[_n_u64]_z + /// public static unsafe Vector HalvingSubtractReversed(Vector left, sbyte right); // svhsubr[_n_s8]_m or svhsubr[_n_s8]_x or svhsubr[_n_s8]_z + /// public static unsafe Vector HalvingSubtractReversed(Vector left, short right); // svhsubr[_n_s16]_m or svhsubr[_n_s16]_x or svhsubr[_n_s16]_z + /// public static unsafe Vector HalvingSubtractReversed(Vector left, int right); // svhsubr[_n_s32]_m or svhsubr[_n_s32]_x or svhsubr[_n_s32]_z + /// public static unsafe Vector HalvingSubtractReversed(Vector left, long right); // svhsubr[_n_s64]_m or svhsubr[_n_s64]_x or svhsubr[_n_s64]_z + /// public static unsafe Vector HalvingSubtractReversed(Vector left, byte right); // svhsubr[_n_u8]_m or svhsubr[_n_u8]_x or svhsubr[_n_u8]_z + /// public static unsafe Vector HalvingSubtractReversed(Vector left, ushort right); // svhsubr[_n_u16]_m or svhsubr[_n_u16]_x or svhsubr[_n_u16]_z + /// public static unsafe Vector HalvingSubtractReversed(Vector left, uint right); // svhsubr[_n_u32]_m or svhsubr[_n_u32]_x or svhsubr[_n_u32]_z + /// public static unsafe Vector HalvingSubtractReversed(Vector left, ulong right); // svhsubr[_n_u64]_m or svhsubr[_n_u64]_x or svhsubr[_n_u64]_z + /// public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, sbyte op3); // svmlalb[_n_s16] + /// public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, short op3); // svmlalb[_n_s32] + /// public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, int op3); // svmlalb[_n_s64] + /// public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, byte op3); // svmlalb[_n_u16] + /// public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, ushort op3); // svmlalb[_n_u32] + /// public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, uint op3); // svmlalb[_n_u64] + /// public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, sbyte op3); // svmlalt[_n_s16] + /// public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, short op3); // svmlalt[_n_s32] + /// public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, int op3); // svmlalt[_n_s64] + /// public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, byte op3); // svmlalt[_n_u16] + /// public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, ushort op3); // svmlalt[_n_u32] + /// public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, uint op3); // svmlalt[_n_u64] + /// public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, sbyte op3); // svmlslb[_n_s16] + /// public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, short op3); // svmlslb[_n_s32] + /// public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, int op3); // svmlslb[_n_s64] + /// public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, byte op3); // svmlslb[_n_u16] + /// public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, ushort op3); // svmlslb[_n_u32] + /// public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, uint op3); // svmlslb[_n_u64] + /// public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, sbyte op3); // svmlslt[_n_s16] + /// public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, short op3); // svmlslt[_n_s32] + /// public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, int op3); // svmlslt[_n_s64] + /// public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, byte op3); // svmlslt[_n_u16] + /// public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, ushort op3); // svmlslt[_n_u32] + /// public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, uint op3); // svmlslt[_n_u64] + /// public static unsafe Vector MultiplyWideningLower(Vector left, sbyte right); // svmullb[_n_s16] + /// public static unsafe Vector MultiplyWideningLower(Vector left, short right); // svmullb[_n_s32] + /// public static unsafe Vector MultiplyWideningLower(Vector left, int right); // svmullb[_n_s64] + /// public static unsafe Vector MultiplyWideningLower(Vector left, byte right); // svmullb[_n_u16] + /// public static unsafe Vector MultiplyWideningLower(Vector left, ushort right); // svmullb[_n_u32] + /// public static unsafe Vector MultiplyWideningLower(Vector left, uint right); // svmullb[_n_u64] + /// public static unsafe Vector MultiplyWideningUpper(Vector left, sbyte right); // svmullt[_n_s16] + /// public static unsafe Vector MultiplyWideningUpper(Vector left, short right); // svmullt[_n_s32] + /// public static unsafe Vector MultiplyWideningUpper(Vector left, int right); // svmullt[_n_s64] + /// public static unsafe Vector MultiplyWideningUpper(Vector left, byte right); // svmullt[_n_u16] + /// public static unsafe Vector MultiplyWideningUpper(Vector left, ushort right); // svmullt[_n_u32] + /// public static unsafe Vector MultiplyWideningUpper(Vector left, uint right); // svmullt[_n_u64] + /// public static unsafe Vector PolynomialMultiply(Vector left, byte right); // svpmul[_n_u8] + /// public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, byte right); // svpmullb[_n_u16] + /// public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, uint right); // svpmullb[_n_u64] + /// public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, byte right); // svpmullb_pair[_n_u8] + /// public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, uint right); // svpmullb_pair[_n_u32] + /// public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, byte right); // svpmullt[_n_u16] + /// public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, uint right); // svpmullt[_n_u64] + /// public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, byte right); // svpmullt_pair[_n_u8] + /// public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, uint right); // svpmullt_pair[_n_u32] + /// public static unsafe Vector RoundingAddHighNarowingLower(Vector left, short right); // svraddhnb[_n_s16] + /// public static unsafe Vector RoundingAddHighNarowingLower(Vector left, int right); // svraddhnb[_n_s32] + /// public static unsafe Vector RoundingAddHighNarowingLower(Vector left, long right); // svraddhnb[_n_s64] + /// public static unsafe Vector RoundingAddHighNarowingLower(Vector left, ushort right); // svraddhnb[_n_u16] + /// public static unsafe Vector RoundingAddHighNarowingLower(Vector left, uint right); // svraddhnb[_n_u32] + /// public static unsafe Vector RoundingAddHighNarowingLower(Vector left, ulong right); // svraddhnb[_n_u64] + /// public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, short right); // svraddhnt[_n_s16] + /// public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, int right); // svraddhnt[_n_s32] + /// public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, long right); // svraddhnt[_n_s64] + /// public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, ushort right); // svraddhnt[_n_u16] + /// public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, uint right); // svraddhnt[_n_u32] + /// public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, ulong right); // svraddhnt[_n_u64] + /// public static unsafe Vector RoundingHalvingAdd(Vector left, sbyte right); // svrhadd[_n_s8]_m or svrhadd[_n_s8]_x or svrhadd[_n_s8]_z + /// public static unsafe Vector RoundingHalvingAdd(Vector left, short right); // svrhadd[_n_s16]_m or svrhadd[_n_s16]_x or svrhadd[_n_s16]_z + /// public static unsafe Vector RoundingHalvingAdd(Vector left, int right); // svrhadd[_n_s32]_m or svrhadd[_n_s32]_x or svrhadd[_n_s32]_z + /// public static unsafe Vector RoundingHalvingAdd(Vector left, long right); // svrhadd[_n_s64]_m or svrhadd[_n_s64]_x or svrhadd[_n_s64]_z + /// public static unsafe Vector RoundingHalvingAdd(Vector left, byte right); // svrhadd[_n_u8]_m or svrhadd[_n_u8]_x or svrhadd[_n_u8]_z + /// public static unsafe Vector RoundingHalvingAdd(Vector left, ushort right); // svrhadd[_n_u16]_m or svrhadd[_n_u16]_x or svrhadd[_n_u16]_z + /// public static unsafe Vector RoundingHalvingAdd(Vector left, uint right); // svrhadd[_n_u32]_m or svrhadd[_n_u32]_x or svrhadd[_n_u32]_z + /// public static unsafe Vector RoundingHalvingAdd(Vector left, ulong right); // svrhadd[_n_u64]_m or svrhadd[_n_u64]_x or svrhadd[_n_u64]_z + /// public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, short right); // svrsubhnb[_n_s16] + /// public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, int right); // svrsubhnb[_n_s32] + /// public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, long right); // svrsubhnb[_n_s64] + /// public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, ushort right); // svrsubhnb[_n_u16] + /// public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, uint right); // svrsubhnb[_n_u32] + /// public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, ulong right); // svrsubhnb[_n_u64] + /// public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, short right); // svrsubhnt[_n_s16] + /// public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, int right); // svrsubhnt[_n_s32] + /// public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, long right); // svrsubhnt[_n_s64] + /// public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, ushort right); // svrsubhnt[_n_u16] + /// public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, uint right); // svrsubhnt[_n_u32] + /// public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, ulong right); // svrsubhnt[_n_u64] + /// public static unsafe Vector SaturatingDoublingMultiplyAddWideningLower(Vector op1, Vector op2, sbyte op3); // svqdmlalb[_n_s16] + /// public static unsafe Vector SaturatingDoublingMultiplyAddWideningLower(Vector op1, Vector op2, short op3); // svqdmlalb[_n_s32] + /// public static unsafe Vector SaturatingDoublingMultiplyAddWideningLower(Vector op1, Vector op2, int op3); // svqdmlalb[_n_s64] + /// public static unsafe Vector SaturatingDoublingMultiplyAddWideningLowerUpper(Vector op1, Vector op2, sbyte op3); // svqdmlalbt[_n_s16] + /// public static unsafe Vector SaturatingDoublingMultiplyAddWideningLowerUpper(Vector op1, Vector op2, short op3); // svqdmlalbt[_n_s32] + /// public static unsafe Vector SaturatingDoublingMultiplyAddWideningLowerUpper(Vector op1, Vector op2, int op3); // svqdmlalbt[_n_s64] + /// public static unsafe Vector SaturatingDoublingMultiplyAddWideningUpper(Vector op1, Vector op2, sbyte op3); // svqdmlalt[_n_s16] + /// public static unsafe Vector SaturatingDoublingMultiplyAddWideningUpper(Vector op1, Vector op2, short op3); // svqdmlalt[_n_s32] + /// public static unsafe Vector SaturatingDoublingMultiplyAddWideningUpper(Vector op1, Vector op2, int op3); // svqdmlalt[_n_s64] + /// public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector left, sbyte right); // svqdmulh[_n_s8] + /// public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector left, short right); // svqdmulh[_n_s16] + /// public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector left, int right); // svqdmulh[_n_s32] + /// public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector left, long right); // svqdmulh[_n_s64] + /// public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLower(Vector op1, Vector op2, sbyte op3); // svqdmlslb[_n_s16] + /// public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLower(Vector op1, Vector op2, short op3); // svqdmlslb[_n_s32] + /// public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLower(Vector op1, Vector op2, int op3); // svqdmlslb[_n_s64] + /// public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLowerUpper(Vector op1, Vector op2, sbyte op3); // svqdmlslbt[_n_s16] + /// public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLowerUpper(Vector op1, Vector op2, short op3); // svqdmlslbt[_n_s32] + /// public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLowerUpper(Vector op1, Vector op2, int op3); // svqdmlslbt[_n_s64] + /// public static unsafe Vector SaturatingDoublingMultiplySubtractWideningUpper(Vector op1, Vector op2, sbyte op3); // svqdmlslt[_n_s16] + /// public static unsafe Vector SaturatingDoublingMultiplySubtractWideningUpper(Vector op1, Vector op2, short op3); // svqdmlslt[_n_s32] + /// public static unsafe Vector SaturatingDoublingMultiplySubtractWideningUpper(Vector op1, Vector op2, int op3); // svqdmlslt[_n_s64] + /// public static unsafe Vector SaturatingDoublingMultiplyWideningLower(Vector left, sbyte right); // svqdmullb[_n_s16] + /// public static unsafe Vector SaturatingDoublingMultiplyWideningLower(Vector left, short right); // svqdmullb[_n_s32] + /// public static unsafe Vector SaturatingDoublingMultiplyWideningLower(Vector left, int right); // svqdmullb[_n_s64] + /// public static unsafe Vector SaturatingDoublingMultiplyWideningUpper(Vector left, sbyte right); // svqdmullt[_n_s16] + /// public static unsafe Vector SaturatingDoublingMultiplyWideningUpper(Vector left, short right); // svqdmullt[_n_s32] + /// public static unsafe Vector SaturatingDoublingMultiplyWideningUpper(Vector left, int right); // svqdmullt[_n_s64] + /// public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, sbyte op3); // svqrdmlah[_n_s8] + /// public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, short op3); // svqrdmlah[_n_s16] + /// public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, int op3); // svqrdmlah[_n_s32] + /// public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, long op3); // svqrdmlah[_n_s64] + /// public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector left, sbyte right); // svqrdmulh[_n_s8] + /// public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector left, short right); // svqrdmulh[_n_s16] + /// public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector left, int right); // svqrdmulh[_n_s32] + /// public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector left, long right); // svqrdmulh[_n_s64] + /// public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, sbyte op3); // svqrdmlsh[_n_s8] + /// public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, short op3); // svqrdmlsh[_n_s16] + /// public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, int op3); // svqrdmlsh[_n_s32] + /// public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, long op3); // svqrdmlsh[_n_s64] + /// public static unsafe Vector SubtractHighNarowingLower(Vector left, short right); // svsubhnb[_n_s16] + /// public static unsafe Vector SubtractHighNarowingLower(Vector left, int right); // svsubhnb[_n_s32] + /// public static unsafe Vector SubtractHighNarowingLower(Vector left, long right); // svsubhnb[_n_s64] + /// public static unsafe Vector SubtractHighNarowingLower(Vector left, ushort right); // svsubhnb[_n_u16] + /// public static unsafe Vector SubtractHighNarowingLower(Vector left, uint right); // svsubhnb[_n_u32] + /// public static unsafe Vector SubtractHighNarowingLower(Vector left, ulong right); // svsubhnb[_n_u64] + /// public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, short right); // svsubhnt[_n_s16] + /// public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, int right); // svsubhnt[_n_s32] + /// public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, long right); // svsubhnt[_n_s64] + /// public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, ushort right); // svsubhnt[_n_u16] + /// public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, uint right); // svsubhnt[_n_u32] + /// public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, ulong right); // svsubhnt[_n_u64] + /// public static unsafe Vector SubtractSaturate(Vector left, sbyte right); // svqsub[_n_s8]_m or svqsub[_n_s8]_x or svqsub[_n_s8]_z + /// public static unsafe Vector SubtractSaturate(Vector left, short right); // svqsub[_n_s16]_m or svqsub[_n_s16]_x or svqsub[_n_s16]_z + /// public static unsafe Vector SubtractSaturate(Vector left, int right); // svqsub[_n_s32]_m or svqsub[_n_s32]_x or svqsub[_n_s32]_z + /// public static unsafe Vector SubtractSaturate(Vector left, long right); // svqsub[_n_s64]_m or svqsub[_n_s64]_x or svqsub[_n_s64]_z + /// public static unsafe Vector SubtractSaturate(Vector left, byte right); // svqsub[_n_u8]_m or svqsub[_n_u8]_x or svqsub[_n_u8]_z + /// public static unsafe Vector SubtractSaturate(Vector left, ushort right); // svqsub[_n_u16]_m or svqsub[_n_u16]_x or svqsub[_n_u16]_z + /// public static unsafe Vector SubtractSaturate(Vector left, uint right); // svqsub[_n_u32]_m or svqsub[_n_u32]_x or svqsub[_n_u32]_z + /// public static unsafe Vector SubtractSaturate(Vector left, ulong right); // svqsub[_n_u64]_m or svqsub[_n_u64]_x or svqsub[_n_u64]_z + /// public static unsafe Vector SubtractSaturateReversed(Vector left, sbyte right); // svqsubr[_n_s8]_m or svqsubr[_n_s8]_x or svqsubr[_n_s8]_z + /// public static unsafe Vector SubtractSaturateReversed(Vector left, short right); // svqsubr[_n_s16]_m or svqsubr[_n_s16]_x or svqsubr[_n_s16]_z + /// public static unsafe Vector SubtractSaturateReversed(Vector left, int right); // svqsubr[_n_s32]_m or svqsubr[_n_s32]_x or svqsubr[_n_s32]_z + /// public static unsafe Vector SubtractSaturateReversed(Vector left, long right); // svqsubr[_n_s64]_m or svqsubr[_n_s64]_x or svqsubr[_n_s64]_z + /// public static unsafe Vector SubtractSaturateReversed(Vector left, byte right); // svqsubr[_n_u8]_m or svqsubr[_n_u8]_x or svqsubr[_n_u8]_z + /// public static unsafe Vector SubtractSaturateReversed(Vector left, ushort right); // svqsubr[_n_u16]_m or svqsubr[_n_u16]_x or svqsubr[_n_u16]_z + /// public static unsafe Vector SubtractSaturateReversed(Vector left, uint right); // svqsubr[_n_u32]_m or svqsubr[_n_u32]_x or svqsubr[_n_u32]_z + /// public static unsafe Vector SubtractSaturateReversed(Vector left, ulong right); // svqsubr[_n_u64]_m or svqsubr[_n_u64]_x or svqsubr[_n_u64]_z + /// public static unsafe Vector SubtractWideLower(Vector left, sbyte right); // svsubwb[_n_s16] + /// public static unsafe Vector SubtractWideLower(Vector left, short right); // svsubwb[_n_s32] + /// public static unsafe Vector SubtractWideLower(Vector left, int right); // svsubwb[_n_s64] + /// public static unsafe Vector SubtractWideLower(Vector left, byte right); // svsubwb[_n_u16] + /// public static unsafe Vector SubtractWideLower(Vector left, ushort right); // svsubwb[_n_u32] + /// public static unsafe Vector SubtractWideLower(Vector left, uint right); // svsubwb[_n_u64] + /// public static unsafe Vector SubtractWideUpper(Vector left, sbyte right); // svsubwt[_n_s16] + /// public static unsafe Vector SubtractWideUpper(Vector left, short right); // svsubwt[_n_s32] + /// public static unsafe Vector SubtractWideUpper(Vector left, int right); // svsubwt[_n_s64] + /// public static unsafe Vector SubtractWideUpper(Vector left, byte right); // svsubwt[_n_u16] + /// public static unsafe Vector SubtractWideUpper(Vector left, ushort right); // svsubwt[_n_u32] + /// public static unsafe Vector SubtractWideUpper(Vector left, uint right); // svsubwt[_n_u64] + /// public static unsafe Vector SubtractWideningLower(Vector left, sbyte right); // svsublb[_n_s16] + /// public static unsafe Vector SubtractWideningLower(Vector left, short right); // svsublb[_n_s32] + /// public static unsafe Vector SubtractWideningLower(Vector left, int right); // svsublb[_n_s64] + /// public static unsafe Vector SubtractWideningLower(Vector left, byte right); // svsublb[_n_u16] + /// public static unsafe Vector SubtractWideningLower(Vector left, ushort right); // svsublb[_n_u32] + /// public static unsafe Vector SubtractWideningLower(Vector left, uint right); // svsublb[_n_u64] + /// public static unsafe Vector SubtractWideningLowerUpper(Vector left, sbyte right); // svsublbt[_n_s16] + /// public static unsafe Vector SubtractWideningLowerUpper(Vector left, short right); // svsublbt[_n_s32] + /// public static unsafe Vector SubtractWideningLowerUpper(Vector left, int right); // svsublbt[_n_s64] + /// public static unsafe Vector SubtractWideningUpper(Vector left, sbyte right); // svsublt[_n_s16] + /// public static unsafe Vector SubtractWideningUpper(Vector left, short right); // svsublt[_n_s32] + /// public static unsafe Vector SubtractWideningUpper(Vector left, int right); // svsublt[_n_s64] + /// public static unsafe Vector SubtractWideningUpper(Vector left, byte right); // svsublt[_n_u16] + /// public static unsafe Vector SubtractWideningUpper(Vector left, ushort right); // svsublt[_n_u32] + /// public static unsafe Vector SubtractWideningUpper(Vector left, uint right); // svsublt[_n_u64] + /// public static unsafe Vector SubtractWideningUpperLower(Vector left, sbyte right); // svsubltb[_n_s16] + /// public static unsafe Vector SubtractWideningUpperLower(Vector left, short right); // svsubltb[_n_s32] + /// public static unsafe Vector SubtractWideningUpperLower(Vector left, int right); // svsubltb[_n_s64] + /// public static unsafe Vector SubtractWithBorrowWideningLower(Vector op1, Vector op2, uint op3); // svsbclb[_n_u32] + /// public static unsafe Vector SubtractWithBorrowWideningLower(Vector op1, Vector op2, ulong op3); // svsbclb[_n_u64] + /// public static unsafe Vector SubtractWithBorrowWideningUpper(Vector op1, Vector op2, uint op3); // svsbclt[_n_u32] + /// public static unsafe Vector SubtractWithBorrowWideningUpper(Vector op1, Vector op2, ulong op3); // svsbclt[_n_u64] + /// Total Rejected: 294 + + /// Total ACLE covered across API: 1024 + diff --git a/sve_api/out_api/apiraw_FEAT_SVE2__scatterstores.cs b/sve_api/out_api/apiraw_FEAT_SVE2__scatterstores.cs new file mode 100644 index 0000000000000..e526662a7a6fd --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_SVE2__scatterstores.cs @@ -0,0 +1,316 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve2 : AdvSimd /// Feature: FEAT_SVE2 Category: scatterstores +{ + + /// T: [int, uint], [long, ulong] + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data); // STNT1H + + /// T: uint, ulong + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data); // STNT1H + + /// T: [int, uint], [long, ulong] + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data); // STNT1H + + /// T: uint, ulong + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data); // STNT1H + + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data); // STNT1H + + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data); // STNT1H + + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data); // STNT1H + + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data); // STNT1H + + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data); // STNT1H + + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data); // STNT1H + + public static unsafe void Scatter32BitNarrowing(Vector mask, Vector addresses, Vector data); // STNT1W + + public static unsafe void Scatter32BitNarrowing(Vector mask, Vector addresses, Vector data); // STNT1W + + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector offsets, Vector data); // STNT1W + + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector offsets, Vector data); // STNT1W + + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector offsets, Vector data); // STNT1W + + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector offsets, Vector data); // STNT1W + + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector indices, Vector data); // STNT1W + + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector indices, Vector data); // STNT1W + + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector indices, Vector data); // STNT1W + + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector indices, Vector data); // STNT1W + + /// T: [int, uint], [long, ulong] + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data); // STNT1B + + /// T: uint, ulong + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data); // STNT1B + + /// T: [int, uint], [long, ulong] + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data); // STNT1B + + /// T: uint, ulong + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data); // STNT1B + + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data); // STNT1B + + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data); // STNT1B + + /// T: [float, uint], [int, uint], [double, ulong], [long, ulong] + public static unsafe void ScatterNonTemporal(Vector mask, Vector addresses, Vector data); // STNT1W or STNT1D + + /// T: uint, ulong + public static unsafe void ScatterNonTemporal(Vector mask, Vector addresses, Vector data); // STNT1W or STNT1D + + /// T: [float, uint], [int, uint], [double, long], [ulong, long], [double, ulong], [long, ulong] + public static unsafe void ScatterNonTemporal(Vector mask, T* base, Vector offsets, Vector data); // STNT1W or STNT1D + + /// T: uint, long, ulong + public static unsafe void ScatterNonTemporal(Vector mask, T* base, Vector offsets, Vector data); // STNT1W or STNT1D + + /// T: [double, long], [ulong, long], [double, ulong], [long, ulong] + public static unsafe void ScatterNonTemporal(Vector mask, T* base, Vector indices, Vector data); // STNT1D + + /// T: long, ulong + public static unsafe void ScatterNonTemporal(Vector mask, T* base, Vector indices, Vector data); // STNT1D + + /// total method signatures: 32 + +} + + +/// Full API +public abstract partial class Sve2 : AdvSimd /// Feature: FEAT_SVE2 Category: scatterstores +{ + /// Scatter16BitNarrowing : Truncate to 16 bits and store, non-temporal + + /// void svstnt1h_scatter[_u32base_s32](svbool_t pg, svuint32_t bases, svint32_t data) : "STNT1H Zdata.S, Pg, [Zbases.S, XZR]" + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data); + + /// void svstnt1h_scatter[_u32base_u32](svbool_t pg, svuint32_t bases, svuint32_t data) : "STNT1H Zdata.S, Pg, [Zbases.S, XZR]" + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data); + + /// void svstnt1h_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) : "STNT1H Zdata.D, Pg, [Zbases.D, XZR]" + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data); + + /// void svstnt1h_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) : "STNT1H Zdata.D, Pg, [Zbases.D, XZR]" + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data); + + + /// Scatter16BitWithByteOffsetsNarrowing : Truncate to 16 bits and store, non-temporal + + /// void svstnt1h_scatter_[u32]offset[_s32](svbool_t pg, int16_t *base, svuint32_t offsets, svint32_t data) : "STNT1H Zdata.S, Pg, [Zoffsets.S, Xbase]" + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data); + + /// void svstnt1h_scatter_[u32]offset[_u32](svbool_t pg, uint16_t *base, svuint32_t offsets, svuint32_t data) : "STNT1H Zdata.S, Pg, [Zoffsets.S, Xbase]" + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data); + + /// void svstnt1h_scatter_[s64]offset[_s64](svbool_t pg, int16_t *base, svint64_t offsets, svint64_t data) : "STNT1H Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data); + + /// void svstnt1h_scatter_[s64]offset[_u64](svbool_t pg, uint16_t *base, svint64_t offsets, svuint64_t data) : "STNT1H Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data); + + /// void svstnt1h_scatter_[u64]offset[_s64](svbool_t pg, int16_t *base, svuint64_t offsets, svint64_t data) : "STNT1H Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data); + + /// void svstnt1h_scatter_[u64]offset[_u64](svbool_t pg, uint16_t *base, svuint64_t offsets, svuint64_t data) : "STNT1H Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data); + + /// void svstnt1h_scatter_[s64]index[_s64](svbool_t pg, int16_t *base, svint64_t indices, svint64_t data) : "STNT1H Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data); + + /// void svstnt1h_scatter_[s64]index[_u64](svbool_t pg, uint16_t *base, svint64_t indices, svuint64_t data) : "STNT1H Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data); + + /// void svstnt1h_scatter_[u64]index[_s64](svbool_t pg, int16_t *base, svuint64_t indices, svint64_t data) : "STNT1H Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data); + + /// void svstnt1h_scatter_[u64]index[_u64](svbool_t pg, uint16_t *base, svuint64_t indices, svuint64_t data) : "STNT1H Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data); + + + /// Scatter32BitNarrowing : Truncate to 32 bits and store, non-temporal + + /// void svstnt1w_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) : "STNT1W Zdata.D, Pg, [Zbases.D, XZR]" + public static unsafe void Scatter32BitNarrowing(Vector mask, Vector addresses, Vector data); + + /// void svstnt1w_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) : "STNT1W Zdata.D, Pg, [Zbases.D, XZR]" + public static unsafe void Scatter32BitNarrowing(Vector mask, Vector addresses, Vector data); + + + /// Scatter32BitWithByteOffsetsNarrowing : Truncate to 32 bits and store, non-temporal + + /// void svstnt1w_scatter_[s64]offset[_s64](svbool_t pg, int32_t *base, svint64_t offsets, svint64_t data) : "STNT1W Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector offsets, Vector data); + + /// void svstnt1w_scatter_[s64]offset[_u64](svbool_t pg, uint32_t *base, svint64_t offsets, svuint64_t data) : "STNT1W Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector offsets, Vector data); + + /// void svstnt1w_scatter_[u64]offset[_s64](svbool_t pg, int32_t *base, svuint64_t offsets, svint64_t data) : "STNT1W Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector offsets, Vector data); + + /// void svstnt1w_scatter_[u64]offset[_u64](svbool_t pg, uint32_t *base, svuint64_t offsets, svuint64_t data) : "STNT1W Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector offsets, Vector data); + + /// void svstnt1w_scatter_[s64]index[_s64](svbool_t pg, int32_t *base, svint64_t indices, svint64_t data) : "STNT1W Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector indices, Vector data); + + /// void svstnt1w_scatter_[s64]index[_u64](svbool_t pg, uint32_t *base, svint64_t indices, svuint64_t data) : "STNT1W Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector indices, Vector data); + + /// void svstnt1w_scatter_[u64]index[_s64](svbool_t pg, int32_t *base, svuint64_t indices, svint64_t data) : "STNT1W Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector indices, Vector data); + + /// void svstnt1w_scatter_[u64]index[_u64](svbool_t pg, uint32_t *base, svuint64_t indices, svuint64_t data) : "STNT1W Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector indices, Vector data); + + + /// Scatter8BitNarrowing : Truncate to 8 bits and store, non-temporal + + /// void svstnt1b_scatter[_u32base_s32](svbool_t pg, svuint32_t bases, svint32_t data) : "STNT1B Zdata.S, Pg, [Zbases.S, XZR]" + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data); + + /// void svstnt1b_scatter[_u32base_u32](svbool_t pg, svuint32_t bases, svuint32_t data) : "STNT1B Zdata.S, Pg, [Zbases.S, XZR]" + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data); + + /// void svstnt1b_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) : "STNT1B Zdata.D, Pg, [Zbases.D, XZR]" + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data); + + /// void svstnt1b_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) : "STNT1B Zdata.D, Pg, [Zbases.D, XZR]" + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data); + + + /// Scatter8BitWithByteOffsetsNarrowing : Truncate to 8 bits and store, non-temporal + + /// void svstnt1b_scatter_[u32]offset[_s32](svbool_t pg, int8_t *base, svuint32_t offsets, svint32_t data) : "STNT1B Zdata.S, Pg, [Zoffsets.S, Xbase]" + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data); + + /// void svstnt1b_scatter_[u32]offset[_u32](svbool_t pg, uint8_t *base, svuint32_t offsets, svuint32_t data) : "STNT1B Zdata.S, Pg, [Zoffsets.S, Xbase]" + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data); + + /// void svstnt1b_scatter_[s64]offset[_s64](svbool_t pg, int8_t *base, svint64_t offsets, svint64_t data) : "STNT1B Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data); + + /// void svstnt1b_scatter_[s64]offset[_u64](svbool_t pg, uint8_t *base, svint64_t offsets, svuint64_t data) : "STNT1B Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data); + + /// void svstnt1b_scatter_[u64]offset[_s64](svbool_t pg, int8_t *base, svuint64_t offsets, svint64_t data) : "STNT1B Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data); + + /// void svstnt1b_scatter_[u64]offset[_u64](svbool_t pg, uint8_t *base, svuint64_t offsets, svuint64_t data) : "STNT1B Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data); + + + /// ScatterNonTemporal : Non-truncating store, non-temporal + + /// void svstnt1_scatter[_u32base_f32](svbool_t pg, svuint32_t bases, svfloat32_t data) : "STNT1W Zdata.S, Pg, [Zbases.S, XZR]" + public static unsafe void ScatterNonTemporal(Vector mask, Vector addresses, Vector data); + + /// void svstnt1_scatter[_u32base_s32](svbool_t pg, svuint32_t bases, svint32_t data) : "STNT1W Zdata.S, Pg, [Zbases.S, XZR]" + public static unsafe void ScatterNonTemporal(Vector mask, Vector addresses, Vector data); + + /// void svstnt1_scatter[_u32base_u32](svbool_t pg, svuint32_t bases, svuint32_t data) : "STNT1W Zdata.S, Pg, [Zbases.S, XZR]" + public static unsafe void ScatterNonTemporal(Vector mask, Vector addresses, Vector data); + + /// void svstnt1_scatter[_u64base_f64](svbool_t pg, svuint64_t bases, svfloat64_t data) : "STNT1D Zdata.D, Pg, [Zbases.D, XZR]" + public static unsafe void ScatterNonTemporal(Vector mask, Vector addresses, Vector data); + + /// void svstnt1_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) : "STNT1D Zdata.D, Pg, [Zbases.D, XZR]" + public static unsafe void ScatterNonTemporal(Vector mask, Vector addresses, Vector data); + + /// void svstnt1_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) : "STNT1D Zdata.D, Pg, [Zbases.D, XZR]" + public static unsafe void ScatterNonTemporal(Vector mask, Vector addresses, Vector data); + + /// void svstnt1_scatter_[u32]offset[_f32](svbool_t pg, float32_t *base, svuint32_t offsets, svfloat32_t data) : "STNT1W Zdata.S, Pg, [Zoffsets.S, Xbase]" + public static unsafe void ScatterNonTemporal(Vector mask, float* base, Vector offsets, Vector data); + + /// void svstnt1_scatter_[u32]offset[_s32](svbool_t pg, int32_t *base, svuint32_t offsets, svint32_t data) : "STNT1W Zdata.S, Pg, [Zoffsets.S, Xbase]" + public static unsafe void ScatterNonTemporal(Vector mask, int* base, Vector offsets, Vector data); + + /// void svstnt1_scatter_[u32]offset[_u32](svbool_t pg, uint32_t *base, svuint32_t offsets, svuint32_t data) : "STNT1W Zdata.S, Pg, [Zoffsets.S, Xbase]" + public static unsafe void ScatterNonTemporal(Vector mask, uint* base, Vector offsets, Vector data); + + /// void svstnt1_scatter_[s64]offset[_f64](svbool_t pg, float64_t *base, svint64_t offsets, svfloat64_t data) : "STNT1D Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void ScatterNonTemporal(Vector mask, double* base, Vector offsets, Vector data); + + /// void svstnt1_scatter_[s64]offset[_s64](svbool_t pg, int64_t *base, svint64_t offsets, svint64_t data) : "STNT1D Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void ScatterNonTemporal(Vector mask, long* base, Vector offsets, Vector data); + + /// void svstnt1_scatter_[s64]offset[_u64](svbool_t pg, uint64_t *base, svint64_t offsets, svuint64_t data) : "STNT1D Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void ScatterNonTemporal(Vector mask, ulong* base, Vector offsets, Vector data); + + /// void svstnt1_scatter_[u64]offset[_f64](svbool_t pg, float64_t *base, svuint64_t offsets, svfloat64_t data) : "STNT1D Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void ScatterNonTemporal(Vector mask, double* base, Vector offsets, Vector data); + + /// void svstnt1_scatter_[u64]offset[_s64](svbool_t pg, int64_t *base, svuint64_t offsets, svint64_t data) : "STNT1D Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void ScatterNonTemporal(Vector mask, long* base, Vector offsets, Vector data); + + /// void svstnt1_scatter_[u64]offset[_u64](svbool_t pg, uint64_t *base, svuint64_t offsets, svuint64_t data) : "STNT1D Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void ScatterNonTemporal(Vector mask, ulong* base, Vector offsets, Vector data); + + /// void svstnt1_scatter_[s64]index[_f64](svbool_t pg, float64_t *base, svint64_t indices, svfloat64_t data) : "STNT1D Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void ScatterNonTemporal(Vector mask, double* base, Vector indices, Vector data); + + /// void svstnt1_scatter_[s64]index[_s64](svbool_t pg, int64_t *base, svint64_t indices, svint64_t data) : "STNT1D Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void ScatterNonTemporal(Vector mask, long* base, Vector indices, Vector data); + + /// void svstnt1_scatter_[s64]index[_u64](svbool_t pg, uint64_t *base, svint64_t indices, svuint64_t data) : "STNT1D Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void ScatterNonTemporal(Vector mask, ulong* base, Vector indices, Vector data); + + /// void svstnt1_scatter_[u64]index[_f64](svbool_t pg, float64_t *base, svuint64_t indices, svfloat64_t data) : "STNT1D Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void ScatterNonTemporal(Vector mask, double* base, Vector indices, Vector data); + + /// void svstnt1_scatter_[u64]index[_s64](svbool_t pg, int64_t *base, svuint64_t indices, svint64_t data) : "STNT1D Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void ScatterNonTemporal(Vector mask, long* base, Vector indices, Vector data); + + /// void svstnt1_scatter_[u64]index[_u64](svbool_t pg, uint64_t *base, svuint64_t indices, svuint64_t data) : "STNT1D Zdata.D, Pg, [Zoffsets.D, Xbase]" + public static unsafe void ScatterNonTemporal(Vector mask, ulong* base, Vector indices, Vector data); + + + /// total method signatures: 55 + /// total method names: 7 +} + + + /// Rejected: + /// public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, Vector address, long offset, Vector data); // svstnt1h_scatter[_u32base]_offset[_s32] + /// public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, Vector address, long offset, Vector data); // svstnt1h_scatter[_u32base]_offset[_u32] + /// public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, Vector address, long offset, Vector data); // svstnt1h_scatter[_u64base]_offset[_s64] + /// public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, Vector address, long offset, Vector data); // svstnt1h_scatter[_u64base]_offset[_u64] + /// public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, Vector address, long index, Vector data); // svstnt1h_scatter[_u32base]_index[_s32] + /// public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, Vector address, long index, Vector data); // svstnt1h_scatter[_u32base]_index[_u32] + /// public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, Vector address, long index, Vector data); // svstnt1h_scatter[_u64base]_index[_s64] + /// public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, Vector address, long index, Vector data); // svstnt1h_scatter[_u64base]_index[_u64] + /// public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, Vector address, long offset, Vector data); // svstnt1w_scatter[_u64base]_offset[_s64] + /// public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, Vector address, long offset, Vector data); // svstnt1w_scatter[_u64base]_offset[_u64] + /// public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, Vector address, long index, Vector data); // svstnt1w_scatter[_u64base]_index[_s64] + /// public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, Vector address, long index, Vector data); // svstnt1w_scatter[_u64base]_index[_u64] + /// public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, Vector address, long offset, Vector data); // svstnt1b_scatter[_u32base]_offset[_s32] + /// public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, Vector address, long offset, Vector data); // svstnt1b_scatter[_u32base]_offset[_u32] + /// public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, Vector address, long offset, Vector data); // svstnt1b_scatter[_u64base]_offset[_s64] + /// public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, Vector address, long offset, Vector data); // svstnt1b_scatter[_u64base]_offset[_u64] + /// public static unsafe void ScatterNonTemporal(Vector mask, Vector bases, long offset, Vector data); // svstnt1_scatter[_u32base]_offset[_f32] + /// public static unsafe void ScatterNonTemporal(Vector mask, Vector bases, long offset, Vector data); // svstnt1_scatter[_u32base]_offset[_s32] + /// public static unsafe void ScatterNonTemporal(Vector mask, Vector bases, long offset, Vector data); // svstnt1_scatter[_u32base]_offset[_u32] + /// public static unsafe void ScatterNonTemporal(Vector mask, Vector bases, long offset, Vector data); // svstnt1_scatter[_u64base]_offset[_f64] + /// public static unsafe void ScatterNonTemporal(Vector mask, Vector bases, long offset, Vector data); // svstnt1_scatter[_u64base]_offset[_s64] + /// public static unsafe void ScatterNonTemporal(Vector mask, Vector bases, long offset, Vector data); // svstnt1_scatter[_u64base]_offset[_u64] + /// public static unsafe void ScatterNonTemporal(Vector mask, Vector bases, long index, Vector data); // svstnt1_scatter[_u32base]_index[_f32] + /// public static unsafe void ScatterNonTemporal(Vector mask, Vector bases, long index, Vector data); // svstnt1_scatter[_u32base]_index[_s32] + /// public static unsafe void ScatterNonTemporal(Vector mask, Vector bases, long index, Vector data); // svstnt1_scatter[_u32base]_index[_u32] + /// public static unsafe void ScatterNonTemporal(Vector mask, Vector bases, long index, Vector data); // svstnt1_scatter[_u64base]_index[_f64] + /// public static unsafe void ScatterNonTemporal(Vector mask, Vector bases, long index, Vector data); // svstnt1_scatter[_u64base]_index[_s64] + /// public static unsafe void ScatterNonTemporal(Vector mask, Vector bases, long index, Vector data); // svstnt1_scatter[_u64base]_index[_u64] + /// Total Rejected: 28 + + /// Total ACLE covered across API: 83 + diff --git a/sve_api/out_api/apiraw_FEAT_SVE_AES__.cs b/sve_api/out_api/apiraw_FEAT_SVE_AES__.cs new file mode 100644 index 0000000000000..2e2aaec2a82ac --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_SVE_AES__.cs @@ -0,0 +1,82 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class SveAes : AdvSimd /// Feature: FEAT_SVE_AES +{ + + public static unsafe Vector AesInverseMixColumns(Vector value); // AESIMC + + public static unsafe Vector AesMixColumns(Vector value); // AESMC + + public static unsafe Vector AesSingleRoundDecryption(Vector left, Vector right); // AESD + + public static unsafe Vector AesSingleRoundEncryption(Vector left, Vector right); // AESE + + public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, Vector right); // PMULLB + + public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, Vector right); // PMULLT + + /// total method signatures: 6 + + + /// Optional Entries: + + public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, ulong right); // PMULLB + + public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, ulong right); // PMULLT + + /// total optional method signatures: 2 + +} + + +/// Full API +public abstract partial class SveAes : AdvSimd /// Feature: FEAT_SVE_AES +{ + /// AesInverseMixColumns : AES inverse mix columns + + /// svuint8_t svaesimc[_u8](svuint8_t op) : "AESIMC Ztied.B, Ztied.B" + public static unsafe Vector AesInverseMixColumns(Vector value); + + + /// AesMixColumns : AES mix columns + + /// svuint8_t svaesmc[_u8](svuint8_t op) : "AESMC Ztied.B, Ztied.B" + public static unsafe Vector AesMixColumns(Vector value); + + + /// AesSingleRoundDecryption : AES single round decryption + + /// svuint8_t svaesd[_u8](svuint8_t op1, svuint8_t op2) : "AESD Ztied1.B, Ztied1.B, Zop2.B" or "AESD Ztied2.B, Ztied2.B, Zop1.B" + public static unsafe Vector AesSingleRoundDecryption(Vector left, Vector right); + + + /// AesSingleRoundEncryption : AES single round encryption + + /// svuint8_t svaese[_u8](svuint8_t op1, svuint8_t op2) : "AESE Ztied1.B, Ztied1.B, Zop2.B" or "AESE Ztied2.B, Ztied2.B, Zop1.B" + public static unsafe Vector AesSingleRoundEncryption(Vector left, Vector right); + + + /// PolynomialMultiplyWideningLower : Polynomial multiply long (bottom) + + /// svuint64_t svpmullb_pair[_u64](svuint64_t op1, svuint64_t op2) : "PMULLB Zresult.Q, Zop1.D, Zop2.D" + public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, Vector right); + + + /// PolynomialMultiplyWideningUpper : Polynomial multiply long (top) + + /// svuint64_t svpmullt_pair[_u64](svuint64_t op1, svuint64_t op2) : "PMULLT Zresult.Q, Zop1.D, Zop2.D" + public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, Vector right); + + + /// total method signatures: 6 + /// total method names: 6 +} + + /// Optional Entries: + /// public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, ulong right); // svpmullb_pair[_n_u64] + /// public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, ulong right); // svpmullt_pair[_n_u64] + /// Total Maybe: 2 + + /// Total ACLE covered across API: 8 + diff --git a/sve_api/out_api/apiraw_FEAT_SVE_BitPerm__.cs b/sve_api/out_api/apiraw_FEAT_SVE_BitPerm__.cs new file mode 100644 index 0000000000000..5d644616bcc3e --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_SVE_BitPerm__.cs @@ -0,0 +1,103 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class SveBitperm : AdvSimd /// Feature: FEAT_SVE_BitPerm +{ + + /// T: byte, ushort, uint, ulong + public static unsafe Vector GatherLowerBitsFromPositionsSelectedByBitmask(Vector left, Vector right); // BEXT + + /// T: byte, ushort, uint, ulong + public static unsafe Vector GroupBitsToRightOrLeftAsSelectedByBitmask(Vector left, Vector right); // BGRP + + /// T: byte, ushort, uint, ulong + public static unsafe Vector ScatterLowerBitsIntoPositionsSelectedByBitmask(Vector left, Vector right); // BDEP + + /// total method signatures: 3 + + + /// Optional Entries: + + /// T: byte, ushort, uint, ulong + public static unsafe Vector GatherLowerBitsFromPositionsSelectedByBitmask(Vector left, T right); // BEXT + + /// T: byte, ushort, uint, ulong + public static unsafe Vector GroupBitsToRightOrLeftAsSelectedByBitmask(Vector left, T right); // BGRP + + /// T: byte, ushort, uint, ulong + public static unsafe Vector ScatterLowerBitsIntoPositionsSelectedByBitmask(Vector left, T right); // BDEP + + /// total optional method signatures: 3 + +} + + +/// Full API +public abstract partial class SveBitperm : AdvSimd /// Feature: FEAT_SVE_BitPerm +{ + /// GatherLowerBitsFromPositionsSelectedByBitmask : Gather lower bits from positions selected by bitmask + + /// svuint8_t svbext[_u8](svuint8_t op1, svuint8_t op2) : "BEXT Zresult.B, Zop1.B, Zop2.B" + public static unsafe Vector GatherLowerBitsFromPositionsSelectedByBitmask(Vector left, Vector right); + + /// svuint16_t svbext[_u16](svuint16_t op1, svuint16_t op2) : "BEXT Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector GatherLowerBitsFromPositionsSelectedByBitmask(Vector left, Vector right); + + /// svuint32_t svbext[_u32](svuint32_t op1, svuint32_t op2) : "BEXT Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector GatherLowerBitsFromPositionsSelectedByBitmask(Vector left, Vector right); + + /// svuint64_t svbext[_u64](svuint64_t op1, svuint64_t op2) : "BEXT Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector GatherLowerBitsFromPositionsSelectedByBitmask(Vector left, Vector right); + + + /// GroupBitsToRightOrLeftAsSelectedByBitmask : Group bits to right or left as selected by bitmask + + /// svuint8_t svbgrp[_u8](svuint8_t op1, svuint8_t op2) : "BGRP Zresult.B, Zop1.B, Zop2.B" + public static unsafe Vector GroupBitsToRightOrLeftAsSelectedByBitmask(Vector left, Vector right); + + /// svuint16_t svbgrp[_u16](svuint16_t op1, svuint16_t op2) : "BGRP Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector GroupBitsToRightOrLeftAsSelectedByBitmask(Vector left, Vector right); + + /// svuint32_t svbgrp[_u32](svuint32_t op1, svuint32_t op2) : "BGRP Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector GroupBitsToRightOrLeftAsSelectedByBitmask(Vector left, Vector right); + + /// svuint64_t svbgrp[_u64](svuint64_t op1, svuint64_t op2) : "BGRP Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector GroupBitsToRightOrLeftAsSelectedByBitmask(Vector left, Vector right); + + + /// ScatterLowerBitsIntoPositionsSelectedByBitmask : Scatter lower bits into positions selected by bitmask + + /// svuint8_t svbdep[_u8](svuint8_t op1, svuint8_t op2) : "BDEP Zresult.B, Zop1.B, Zop2.B" + public static unsafe Vector ScatterLowerBitsIntoPositionsSelectedByBitmask(Vector left, Vector right); + + /// svuint16_t svbdep[_u16](svuint16_t op1, svuint16_t op2) : "BDEP Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector ScatterLowerBitsIntoPositionsSelectedByBitmask(Vector left, Vector right); + + /// svuint32_t svbdep[_u32](svuint32_t op1, svuint32_t op2) : "BDEP Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector ScatterLowerBitsIntoPositionsSelectedByBitmask(Vector left, Vector right); + + /// svuint64_t svbdep[_u64](svuint64_t op1, svuint64_t op2) : "BDEP Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector ScatterLowerBitsIntoPositionsSelectedByBitmask(Vector left, Vector right); + + + /// total method signatures: 12 + /// total method names: 3 +} + + /// Optional Entries: + /// public static unsafe Vector GatherLowerBitsFromPositionsSelectedByBitmask(Vector left, byte right); // svbext[_n_u8] + /// public static unsafe Vector GatherLowerBitsFromPositionsSelectedByBitmask(Vector left, ushort right); // svbext[_n_u16] + /// public static unsafe Vector GatherLowerBitsFromPositionsSelectedByBitmask(Vector left, uint right); // svbext[_n_u32] + /// public static unsafe Vector GatherLowerBitsFromPositionsSelectedByBitmask(Vector left, ulong right); // svbext[_n_u64] + /// public static unsafe Vector GroupBitsToRightOrLeftAsSelectedByBitmask(Vector left, byte right); // svbgrp[_n_u8] + /// public static unsafe Vector GroupBitsToRightOrLeftAsSelectedByBitmask(Vector left, ushort right); // svbgrp[_n_u16] + /// public static unsafe Vector GroupBitsToRightOrLeftAsSelectedByBitmask(Vector left, uint right); // svbgrp[_n_u32] + /// public static unsafe Vector GroupBitsToRightOrLeftAsSelectedByBitmask(Vector left, ulong right); // svbgrp[_n_u64] + /// public static unsafe Vector ScatterLowerBitsIntoPositionsSelectedByBitmask(Vector left, byte right); // svbdep[_n_u8] + /// public static unsafe Vector ScatterLowerBitsIntoPositionsSelectedByBitmask(Vector left, ushort right); // svbdep[_n_u16] + /// public static unsafe Vector ScatterLowerBitsIntoPositionsSelectedByBitmask(Vector left, uint right); // svbdep[_n_u32] + /// public static unsafe Vector ScatterLowerBitsIntoPositionsSelectedByBitmask(Vector left, ulong right); // svbdep[_n_u64] + /// Total Maybe: 12 + + /// Total ACLE covered across API: 24 + diff --git a/sve_api/out_api/apiraw_FEAT_SVE_SHA3__.cs b/sve_api/out_api/apiraw_FEAT_SVE_SHA3__.cs new file mode 100644 index 0000000000000..a82b7be5c9ee4 --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_SVE_SHA3__.cs @@ -0,0 +1,33 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class SveSha3 : AdvSimd /// Feature: FEAT_SVE_SHA3 +{ + + /// T: long, ulong + public static unsafe Vector BitwiseRotateLeftBy1AndXor(Vector left, Vector right); // RAX1 + + /// total method signatures: 1 + +} + + +/// Full API +public abstract partial class SveSha3 : AdvSimd /// Feature: FEAT_SVE_SHA3 +{ + /// BitwiseRotateLeftBy1AndXor : Bitwise rotate left by 1 and exclusive OR + + /// svint64_t svrax1[_s64](svint64_t op1, svint64_t op2) : "RAX1 Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector BitwiseRotateLeftBy1AndXor(Vector left, Vector right); + + /// svuint64_t svrax1[_u64](svuint64_t op1, svuint64_t op2) : "RAX1 Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector BitwiseRotateLeftBy1AndXor(Vector left, Vector right); + + + /// total method signatures: 2 + /// total method names: 1 +} + + + /// Total ACLE covered across API: 2 + diff --git a/sve_api/out_api/apiraw_FEAT_SVE_SM4__.cs b/sve_api/out_api/apiraw_FEAT_SVE_SM4__.cs new file mode 100644 index 0000000000000..93b6c47b92221 --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_SVE_SM4__.cs @@ -0,0 +1,37 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class SveSm4 : AdvSimd /// Feature: FEAT_SVE_SM4 +{ + + public static unsafe Vector Sm4EncryptionAndDecryption(Vector left, Vector right); // SM4E + + public static unsafe Vector Sm4KeyUpdates(Vector left, Vector right); // SM4EKEY + + /// total method signatures: 2 + +} + + +/// Full API +public abstract partial class SveSm4 : AdvSimd /// Feature: FEAT_SVE_SM4 +{ + /// Sm4EncryptionAndDecryption : SM4 encryption and decryption + + /// svuint32_t svsm4e[_u32](svuint32_t op1, svuint32_t op2) : "SM4E Ztied1.S, Ztied1.S, Zop2.S" + public static unsafe Vector Sm4EncryptionAndDecryption(Vector left, Vector right); + + + /// Sm4KeyUpdates : SM4 key updates + + /// svuint32_t svsm4ekey[_u32](svuint32_t op1, svuint32_t op2) : "SM4EKEY Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector Sm4KeyUpdates(Vector left, Vector right); + + + /// total method signatures: 2 + /// total method names: 2 +} + + + /// Total ACLE covered across API: 2 + diff --git a/sve_api/out_api/apiraw_FEAT_SVE__.cs b/sve_api/out_api/apiraw_FEAT_SVE__.cs new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/sve_api/out_api/apiraw_FEAT_SVE__bitmanipulate.cs b/sve_api/out_api/apiraw_FEAT_SVE__bitmanipulate.cs new file mode 100644 index 0000000000000..5597a595f873a --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_SVE__bitmanipulate.cs @@ -0,0 +1,579 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: bitmanipulate +{ + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index); // DUP or TBL + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ReverseBits(Vector value); // RBIT // predicated, MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ReverseElement(Vector value); // REV + + /// T: int, long, uint, ulong + public static unsafe Vector ReverseElement16(Vector value); // REVH // predicated, MOVPRFX + + /// T: long, ulong + public static unsafe Vector ReverseElement32(Vector value); // REVW // predicated, MOVPRFX + + /// T: short, int, long, ushort, uint, ulong + public static unsafe Vector ReverseElement8(Vector value); // REVB // predicated, MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector Splice(Vector mask, Vector left, Vector right); // SPLICE // MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector TransposeEven(Vector left, Vector right); // TRN1 + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector TransposeOdd(Vector left, Vector right); // TRN2 + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector UnzipEven(Vector left, Vector right); // UZP1 + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector UnzipOdd(Vector left, Vector right); // UZP2 + + /// T: [float, uint], [double, ulong], [sbyte, byte], [short, ushort], [int, uint], [long, ulong] + public static unsafe Vector VectorTableLookup(Vector data, Vector indices); // TBL + + /// T: byte, ushort, uint, ulong + public static unsafe Vector VectorTableLookup(Vector data, Vector indices); // TBL + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ZipHigh(Vector left, Vector right); // ZIP2 + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ZipLow(Vector left, Vector right); // ZIP1 + + /// total method signatures: 15 + +} + + +/// Full API +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: bitmanipulate +{ + /// DuplicateSelectedScalarToVector : Broadcast a scalar value + + /// svfloat32_t svdup_lane[_f32](svfloat32_t data, uint32_t index) : "DUP Zresult.S, Zdata.S[index]" or "TBL Zresult.S, Zdata.S, Zindex.S" + /// svfloat32_t svdupq_lane[_f32](svfloat32_t data, uint64_t index) : "DUP Zresult.Q, Zdata.Q[index]" or "TBL Zresult.D, Zdata.D, Zindices_d.D" + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index); + + /// svfloat64_t svdup_lane[_f64](svfloat64_t data, uint64_t index) : "DUP Zresult.D, Zdata.D[index]" or "TBL Zresult.D, Zdata.D, Zindex.D" + /// svfloat64_t svdupq_lane[_f64](svfloat64_t data, uint64_t index) : "DUP Zresult.Q, Zdata.Q[index]" or "TBL Zresult.D, Zdata.D, Zindices_d.D" + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index); + + /// svint8_t svdup_lane[_s8](svint8_t data, uint8_t index) : "DUP Zresult.B, Zdata.B[index]" or "TBL Zresult.B, Zdata.B, Zindex.B" + /// svint8_t svdupq_lane[_s8](svint8_t data, uint64_t index) : "DUP Zresult.Q, Zdata.Q[index]" or "TBL Zresult.D, Zdata.D, Zindices_d.D" + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index); + + /// svint16_t svdup_lane[_s16](svint16_t data, uint16_t index) : "DUP Zresult.H, Zdata.H[index]" or "TBL Zresult.H, Zdata.H, Zindex.H" + /// svint16_t svdupq_lane[_s16](svint16_t data, uint64_t index) : "DUP Zresult.Q, Zdata.Q[index]" or "TBL Zresult.D, Zdata.D, Zindices_d.D" + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index); + + /// svint32_t svdup_lane[_s32](svint32_t data, uint32_t index) : "DUP Zresult.S, Zdata.S[index]" or "TBL Zresult.S, Zdata.S, Zindex.S" + /// svint32_t svdupq_lane[_s32](svint32_t data, uint64_t index) : "DUP Zresult.Q, Zdata.Q[index]" or "TBL Zresult.D, Zdata.D, Zindices_d.D" + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index); + + /// svint64_t svdup_lane[_s64](svint64_t data, uint64_t index) : "DUP Zresult.D, Zdata.D[index]" or "TBL Zresult.D, Zdata.D, Zindex.D" + /// svint64_t svdupq_lane[_s64](svint64_t data, uint64_t index) : "DUP Zresult.Q, Zdata.Q[index]" or "TBL Zresult.D, Zdata.D, Zindices_d.D" + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index); + + /// svuint8_t svdup_lane[_u8](svuint8_t data, uint8_t index) : "DUP Zresult.B, Zdata.B[index]" or "TBL Zresult.B, Zdata.B, Zindex.B" + /// svuint8_t svdupq_lane[_u8](svuint8_t data, uint64_t index) : "DUP Zresult.Q, Zdata.Q[index]" or "TBL Zresult.D, Zdata.D, Zindices_d.D" + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index); + + /// svuint16_t svdup_lane[_u16](svuint16_t data, uint16_t index) : "DUP Zresult.H, Zdata.H[index]" or "TBL Zresult.H, Zdata.H, Zindex.H" + /// svuint16_t svdupq_lane[_u16](svuint16_t data, uint64_t index) : "DUP Zresult.Q, Zdata.Q[index]" or "TBL Zresult.D, Zdata.D, Zindices_d.D" + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index); + + /// svuint32_t svdup_lane[_u32](svuint32_t data, uint32_t index) : "DUP Zresult.S, Zdata.S[index]" or "TBL Zresult.S, Zdata.S, Zindex.S" + /// svuint32_t svdupq_lane[_u32](svuint32_t data, uint64_t index) : "DUP Zresult.Q, Zdata.Q[index]" or "TBL Zresult.D, Zdata.D, Zindices_d.D" + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index); + + /// svuint64_t svdup_lane[_u64](svuint64_t data, uint64_t index) : "DUP Zresult.D, Zdata.D[index]" or "TBL Zresult.D, Zdata.D, Zindex.D" + /// svuint64_t svdupq_lane[_u64](svuint64_t data, uint64_t index) : "DUP Zresult.Q, Zdata.Q[index]" or "TBL Zresult.D, Zdata.D, Zindices_d.D" + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index); + + + /// ReverseBits : Reverse bits + + /// svint8_t svrbit[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) : "RBIT Ztied.B, Pg/M, Zop.B" or "MOVPRFX Zresult, Zinactive; RBIT Zresult.B, Pg/M, Zop.B" + /// svint8_t svrbit[_s8]_x(svbool_t pg, svint8_t op) : "RBIT Ztied.B, Pg/M, Ztied.B" or "MOVPRFX Zresult, Zop; RBIT Zresult.B, Pg/M, Zop.B" + /// svint8_t svrbit[_s8]_z(svbool_t pg, svint8_t op) : "MOVPRFX Zresult.B, Pg/Z, Zop.B; RBIT Zresult.B, Pg/M, Zop.B" + public static unsafe Vector ReverseBits(Vector value); + + /// svint16_t svrbit[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) : "RBIT Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; RBIT Zresult.H, Pg/M, Zop.H" + /// svint16_t svrbit[_s16]_x(svbool_t pg, svint16_t op) : "RBIT Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; RBIT Zresult.H, Pg/M, Zop.H" + /// svint16_t svrbit[_s16]_z(svbool_t pg, svint16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; RBIT Zresult.H, Pg/M, Zop.H" + public static unsafe Vector ReverseBits(Vector value); + + /// svint32_t svrbit[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) : "RBIT Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; RBIT Zresult.S, Pg/M, Zop.S" + /// svint32_t svrbit[_s32]_x(svbool_t pg, svint32_t op) : "RBIT Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; RBIT Zresult.S, Pg/M, Zop.S" + /// svint32_t svrbit[_s32]_z(svbool_t pg, svint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; RBIT Zresult.S, Pg/M, Zop.S" + public static unsafe Vector ReverseBits(Vector value); + + /// svint64_t svrbit[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) : "RBIT Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; RBIT Zresult.D, Pg/M, Zop.D" + /// svint64_t svrbit[_s64]_x(svbool_t pg, svint64_t op) : "RBIT Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; RBIT Zresult.D, Pg/M, Zop.D" + /// svint64_t svrbit[_s64]_z(svbool_t pg, svint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; RBIT Zresult.D, Pg/M, Zop.D" + public static unsafe Vector ReverseBits(Vector value); + + /// svuint8_t svrbit[_u8]_m(svuint8_t inactive, svbool_t pg, svuint8_t op) : "RBIT Ztied.B, Pg/M, Zop.B" or "MOVPRFX Zresult, Zinactive; RBIT Zresult.B, Pg/M, Zop.B" + /// svuint8_t svrbit[_u8]_x(svbool_t pg, svuint8_t op) : "RBIT Ztied.B, Pg/M, Ztied.B" or "MOVPRFX Zresult, Zop; RBIT Zresult.B, Pg/M, Zop.B" + /// svuint8_t svrbit[_u8]_z(svbool_t pg, svuint8_t op) : "MOVPRFX Zresult.B, Pg/Z, Zop.B; RBIT Zresult.B, Pg/M, Zop.B" + public static unsafe Vector ReverseBits(Vector value); + + /// svuint16_t svrbit[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) : "RBIT Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; RBIT Zresult.H, Pg/M, Zop.H" + /// svuint16_t svrbit[_u16]_x(svbool_t pg, svuint16_t op) : "RBIT Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; RBIT Zresult.H, Pg/M, Zop.H" + /// svuint16_t svrbit[_u16]_z(svbool_t pg, svuint16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; RBIT Zresult.H, Pg/M, Zop.H" + public static unsafe Vector ReverseBits(Vector value); + + /// svuint32_t svrbit[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) : "RBIT Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; RBIT Zresult.S, Pg/M, Zop.S" + /// svuint32_t svrbit[_u32]_x(svbool_t pg, svuint32_t op) : "RBIT Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; RBIT Zresult.S, Pg/M, Zop.S" + /// svuint32_t svrbit[_u32]_z(svbool_t pg, svuint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; RBIT Zresult.S, Pg/M, Zop.S" + public static unsafe Vector ReverseBits(Vector value); + + /// svuint64_t svrbit[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) : "RBIT Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; RBIT Zresult.D, Pg/M, Zop.D" + /// svuint64_t svrbit[_u64]_x(svbool_t pg, svuint64_t op) : "RBIT Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; RBIT Zresult.D, Pg/M, Zop.D" + /// svuint64_t svrbit[_u64]_z(svbool_t pg, svuint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; RBIT Zresult.D, Pg/M, Zop.D" + public static unsafe Vector ReverseBits(Vector value); + + + /// ReverseElement : Reverse all elements + + /// svfloat32_t svrev[_f32](svfloat32_t op) : "REV Zresult.S, Zop.S" + public static unsafe Vector ReverseElement(Vector value); + + /// svfloat64_t svrev[_f64](svfloat64_t op) : "REV Zresult.D, Zop.D" + public static unsafe Vector ReverseElement(Vector value); + + /// svint8_t svrev[_s8](svint8_t op) : "REV Zresult.B, Zop.B" + public static unsafe Vector ReverseElement(Vector value); + + /// svint16_t svrev[_s16](svint16_t op) : "REV Zresult.H, Zop.H" + public static unsafe Vector ReverseElement(Vector value); + + /// svint32_t svrev[_s32](svint32_t op) : "REV Zresult.S, Zop.S" + public static unsafe Vector ReverseElement(Vector value); + + /// svint64_t svrev[_s64](svint64_t op) : "REV Zresult.D, Zop.D" + public static unsafe Vector ReverseElement(Vector value); + + /// svuint8_t svrev[_u8](svuint8_t op) : "REV Zresult.B, Zop.B" + /// svbool_t svrev_b8(svbool_t op) : "REV Presult.B, Pop.B" + public static unsafe Vector ReverseElement(Vector value); + + /// svuint16_t svrev[_u16](svuint16_t op) : "REV Zresult.H, Zop.H" + /// svbool_t svrev_b16(svbool_t op) : "REV Presult.H, Pop.H" + public static unsafe Vector ReverseElement(Vector value); + + /// svuint32_t svrev[_u32](svuint32_t op) : "REV Zresult.S, Zop.S" + /// svbool_t svrev_b32(svbool_t op) : "REV Presult.S, Pop.S" + public static unsafe Vector ReverseElement(Vector value); + + /// svuint64_t svrev[_u64](svuint64_t op) : "REV Zresult.D, Zop.D" + /// svbool_t svrev_b64(svbool_t op) : "REV Presult.D, Pop.D" + public static unsafe Vector ReverseElement(Vector value); + + + /// ReverseElement16 : Reverse halfwords within elements + + /// svint32_t svrevh[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) : "REVH Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; REVH Zresult.S, Pg/M, Zop.S" + /// svint32_t svrevh[_s32]_x(svbool_t pg, svint32_t op) : "REVH Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; REVH Zresult.S, Pg/M, Zop.S" + /// svint32_t svrevh[_s32]_z(svbool_t pg, svint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; REVH Zresult.S, Pg/M, Zop.S" + public static unsafe Vector ReverseElement16(Vector value); + + /// svint64_t svrevh[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) : "REVH Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; REVH Zresult.D, Pg/M, Zop.D" + /// svint64_t svrevh[_s64]_x(svbool_t pg, svint64_t op) : "REVH Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; REVH Zresult.D, Pg/M, Zop.D" + /// svint64_t svrevh[_s64]_z(svbool_t pg, svint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; REVH Zresult.D, Pg/M, Zop.D" + public static unsafe Vector ReverseElement16(Vector value); + + /// svuint32_t svrevh[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) : "REVH Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; REVH Zresult.S, Pg/M, Zop.S" + /// svuint32_t svrevh[_u32]_x(svbool_t pg, svuint32_t op) : "REVH Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; REVH Zresult.S, Pg/M, Zop.S" + /// svuint32_t svrevh[_u32]_z(svbool_t pg, svuint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; REVH Zresult.S, Pg/M, Zop.S" + public static unsafe Vector ReverseElement16(Vector value); + + /// svuint64_t svrevh[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) : "REVH Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; REVH Zresult.D, Pg/M, Zop.D" + /// svuint64_t svrevh[_u64]_x(svbool_t pg, svuint64_t op) : "REVH Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; REVH Zresult.D, Pg/M, Zop.D" + /// svuint64_t svrevh[_u64]_z(svbool_t pg, svuint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; REVH Zresult.D, Pg/M, Zop.D" + public static unsafe Vector ReverseElement16(Vector value); + + + /// ReverseElement32 : Reverse words within elements + + /// svint64_t svrevw[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) : "REVW Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; REVW Zresult.D, Pg/M, Zop.D" + /// svint64_t svrevw[_s64]_x(svbool_t pg, svint64_t op) : "REVW Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; REVW Zresult.D, Pg/M, Zop.D" + /// svint64_t svrevw[_s64]_z(svbool_t pg, svint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; REVW Zresult.D, Pg/M, Zop.D" + public static unsafe Vector ReverseElement32(Vector value); + + /// svuint64_t svrevw[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) : "REVW Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; REVW Zresult.D, Pg/M, Zop.D" + /// svuint64_t svrevw[_u64]_x(svbool_t pg, svuint64_t op) : "REVW Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; REVW Zresult.D, Pg/M, Zop.D" + /// svuint64_t svrevw[_u64]_z(svbool_t pg, svuint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; REVW Zresult.D, Pg/M, Zop.D" + public static unsafe Vector ReverseElement32(Vector value); + + + /// ReverseElement8 : Reverse bytes within elements + + /// svint16_t svrevb[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) : "REVB Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; REVB Zresult.H, Pg/M, Zop.H" + /// svint16_t svrevb[_s16]_x(svbool_t pg, svint16_t op) : "REVB Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; REVB Zresult.H, Pg/M, Zop.H" + /// svint16_t svrevb[_s16]_z(svbool_t pg, svint16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; REVB Zresult.H, Pg/M, Zop.H" + public static unsafe Vector ReverseElement8(Vector value); + + /// svint32_t svrevb[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) : "REVB Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; REVB Zresult.S, Pg/M, Zop.S" + /// svint32_t svrevb[_s32]_x(svbool_t pg, svint32_t op) : "REVB Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; REVB Zresult.S, Pg/M, Zop.S" + /// svint32_t svrevb[_s32]_z(svbool_t pg, svint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; REVB Zresult.S, Pg/M, Zop.S" + public static unsafe Vector ReverseElement8(Vector value); + + /// svint64_t svrevb[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) : "REVB Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; REVB Zresult.D, Pg/M, Zop.D" + /// svint64_t svrevb[_s64]_x(svbool_t pg, svint64_t op) : "REVB Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; REVB Zresult.D, Pg/M, Zop.D" + /// svint64_t svrevb[_s64]_z(svbool_t pg, svint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; REVB Zresult.D, Pg/M, Zop.D" + public static unsafe Vector ReverseElement8(Vector value); + + /// svuint16_t svrevb[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) : "REVB Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; REVB Zresult.H, Pg/M, Zop.H" + /// svuint16_t svrevb[_u16]_x(svbool_t pg, svuint16_t op) : "REVB Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; REVB Zresult.H, Pg/M, Zop.H" + /// svuint16_t svrevb[_u16]_z(svbool_t pg, svuint16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; REVB Zresult.H, Pg/M, Zop.H" + public static unsafe Vector ReverseElement8(Vector value); + + /// svuint32_t svrevb[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) : "REVB Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; REVB Zresult.S, Pg/M, Zop.S" + /// svuint32_t svrevb[_u32]_x(svbool_t pg, svuint32_t op) : "REVB Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; REVB Zresult.S, Pg/M, Zop.S" + /// svuint32_t svrevb[_u32]_z(svbool_t pg, svuint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; REVB Zresult.S, Pg/M, Zop.S" + public static unsafe Vector ReverseElement8(Vector value); + + /// svuint64_t svrevb[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) : "REVB Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; REVB Zresult.D, Pg/M, Zop.D" + /// svuint64_t svrevb[_u64]_x(svbool_t pg, svuint64_t op) : "REVB Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; REVB Zresult.D, Pg/M, Zop.D" + /// svuint64_t svrevb[_u64]_z(svbool_t pg, svuint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; REVB Zresult.D, Pg/M, Zop.D" + public static unsafe Vector ReverseElement8(Vector value); + + + /// Splice : Splice two vectors under predicate control + + /// svfloat32_t svsplice[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "SPLICE Ztied1.S, Pg, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; SPLICE Zresult.S, Pg, Zresult.S, Zop2.S" + public static unsafe Vector Splice(Vector mask, Vector left, Vector right); + + /// svfloat64_t svsplice[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "SPLICE Ztied1.D, Pg, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; SPLICE Zresult.D, Pg, Zresult.D, Zop2.D" + public static unsafe Vector Splice(Vector mask, Vector left, Vector right); + + /// svint8_t svsplice[_s8](svbool_t pg, svint8_t op1, svint8_t op2) : "SPLICE Ztied1.B, Pg, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; SPLICE Zresult.B, Pg, Zresult.B, Zop2.B" + public static unsafe Vector Splice(Vector mask, Vector left, Vector right); + + /// svint16_t svsplice[_s16](svbool_t pg, svint16_t op1, svint16_t op2) : "SPLICE Ztied1.H, Pg, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; SPLICE Zresult.H, Pg, Zresult.H, Zop2.H" + public static unsafe Vector Splice(Vector mask, Vector left, Vector right); + + /// svint32_t svsplice[_s32](svbool_t pg, svint32_t op1, svint32_t op2) : "SPLICE Ztied1.S, Pg, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; SPLICE Zresult.S, Pg, Zresult.S, Zop2.S" + public static unsafe Vector Splice(Vector mask, Vector left, Vector right); + + /// svint64_t svsplice[_s64](svbool_t pg, svint64_t op1, svint64_t op2) : "SPLICE Ztied1.D, Pg, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; SPLICE Zresult.D, Pg, Zresult.D, Zop2.D" + public static unsafe Vector Splice(Vector mask, Vector left, Vector right); + + /// svuint8_t svsplice[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) : "SPLICE Ztied1.B, Pg, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; SPLICE Zresult.B, Pg, Zresult.B, Zop2.B" + public static unsafe Vector Splice(Vector mask, Vector left, Vector right); + + /// svuint16_t svsplice[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) : "SPLICE Ztied1.H, Pg, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; SPLICE Zresult.H, Pg, Zresult.H, Zop2.H" + public static unsafe Vector Splice(Vector mask, Vector left, Vector right); + + /// svuint32_t svsplice[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) : "SPLICE Ztied1.S, Pg, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; SPLICE Zresult.S, Pg, Zresult.S, Zop2.S" + public static unsafe Vector Splice(Vector mask, Vector left, Vector right); + + /// svuint64_t svsplice[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) : "SPLICE Ztied1.D, Pg, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; SPLICE Zresult.D, Pg, Zresult.D, Zop2.D" + public static unsafe Vector Splice(Vector mask, Vector left, Vector right); + + + /// TransposeEven : Interleave even elements from two inputs + + /// svfloat32_t svtrn1[_f32](svfloat32_t op1, svfloat32_t op2) : "TRN1 Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector TransposeEven(Vector left, Vector right); + + /// svfloat64_t svtrn1[_f64](svfloat64_t op1, svfloat64_t op2) : "TRN1 Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector TransposeEven(Vector left, Vector right); + + /// svint8_t svtrn1[_s8](svint8_t op1, svint8_t op2) : "TRN1 Zresult.B, Zop1.B, Zop2.B" + public static unsafe Vector TransposeEven(Vector left, Vector right); + + /// svint16_t svtrn1[_s16](svint16_t op1, svint16_t op2) : "TRN1 Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector TransposeEven(Vector left, Vector right); + + /// svint32_t svtrn1[_s32](svint32_t op1, svint32_t op2) : "TRN1 Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector TransposeEven(Vector left, Vector right); + + /// svint64_t svtrn1[_s64](svint64_t op1, svint64_t op2) : "TRN1 Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector TransposeEven(Vector left, Vector right); + + /// svuint8_t svtrn1[_u8](svuint8_t op1, svuint8_t op2) : "TRN1 Zresult.B, Zop1.B, Zop2.B" + /// svbool_t svtrn1_b8(svbool_t op1, svbool_t op2) : "TRN1 Presult.B, Pop1.B, Pop2.B" + public static unsafe Vector TransposeEven(Vector left, Vector right); + + /// svuint16_t svtrn1[_u16](svuint16_t op1, svuint16_t op2) : "TRN1 Zresult.H, Zop1.H, Zop2.H" + /// svbool_t svtrn1_b16(svbool_t op1, svbool_t op2) : "TRN1 Presult.H, Pop1.H, Pop2.H" + public static unsafe Vector TransposeEven(Vector left, Vector right); + + /// svuint32_t svtrn1[_u32](svuint32_t op1, svuint32_t op2) : "TRN1 Zresult.S, Zop1.S, Zop2.S" + /// svbool_t svtrn1_b32(svbool_t op1, svbool_t op2) : "TRN1 Presult.S, Pop1.S, Pop2.S" + public static unsafe Vector TransposeEven(Vector left, Vector right); + + /// svuint64_t svtrn1[_u64](svuint64_t op1, svuint64_t op2) : "TRN1 Zresult.D, Zop1.D, Zop2.D" + /// svbool_t svtrn1_b64(svbool_t op1, svbool_t op2) : "TRN1 Presult.D, Pop1.D, Pop2.D" + public static unsafe Vector TransposeEven(Vector left, Vector right); + + + /// TransposeOdd : Interleave odd elements from two inputs + + /// svfloat32_t svtrn2[_f32](svfloat32_t op1, svfloat32_t op2) : "TRN2 Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector TransposeOdd(Vector left, Vector right); + + /// svfloat64_t svtrn2[_f64](svfloat64_t op1, svfloat64_t op2) : "TRN2 Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector TransposeOdd(Vector left, Vector right); + + /// svint8_t svtrn2[_s8](svint8_t op1, svint8_t op2) : "TRN2 Zresult.B, Zop1.B, Zop2.B" + public static unsafe Vector TransposeOdd(Vector left, Vector right); + + /// svint16_t svtrn2[_s16](svint16_t op1, svint16_t op2) : "TRN2 Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector TransposeOdd(Vector left, Vector right); + + /// svint32_t svtrn2[_s32](svint32_t op1, svint32_t op2) : "TRN2 Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector TransposeOdd(Vector left, Vector right); + + /// svint64_t svtrn2[_s64](svint64_t op1, svint64_t op2) : "TRN2 Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector TransposeOdd(Vector left, Vector right); + + /// svuint8_t svtrn2[_u8](svuint8_t op1, svuint8_t op2) : "TRN2 Zresult.B, Zop1.B, Zop2.B" + /// svbool_t svtrn2_b8(svbool_t op1, svbool_t op2) : "TRN2 Presult.B, Pop1.B, Pop2.B" + public static unsafe Vector TransposeOdd(Vector left, Vector right); + + /// svuint16_t svtrn2[_u16](svuint16_t op1, svuint16_t op2) : "TRN2 Zresult.H, Zop1.H, Zop2.H" + /// svbool_t svtrn2_b16(svbool_t op1, svbool_t op2) : "TRN2 Presult.H, Pop1.H, Pop2.H" + public static unsafe Vector TransposeOdd(Vector left, Vector right); + + /// svuint32_t svtrn2[_u32](svuint32_t op1, svuint32_t op2) : "TRN2 Zresult.S, Zop1.S, Zop2.S" + /// svbool_t svtrn2_b32(svbool_t op1, svbool_t op2) : "TRN2 Presult.S, Pop1.S, Pop2.S" + public static unsafe Vector TransposeOdd(Vector left, Vector right); + + /// svuint64_t svtrn2[_u64](svuint64_t op1, svuint64_t op2) : "TRN2 Zresult.D, Zop1.D, Zop2.D" + /// svbool_t svtrn2_b64(svbool_t op1, svbool_t op2) : "TRN2 Presult.D, Pop1.D, Pop2.D" + public static unsafe Vector TransposeOdd(Vector left, Vector right); + + + /// UnzipEven : Concatenate even elements from two inputs + + /// svfloat32_t svuzp1[_f32](svfloat32_t op1, svfloat32_t op2) : "UZP1 Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector UnzipEven(Vector left, Vector right); + + /// svfloat64_t svuzp1[_f64](svfloat64_t op1, svfloat64_t op2) : "UZP1 Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector UnzipEven(Vector left, Vector right); + + /// svint8_t svuzp1[_s8](svint8_t op1, svint8_t op2) : "UZP1 Zresult.B, Zop1.B, Zop2.B" + public static unsafe Vector UnzipEven(Vector left, Vector right); + + /// svint16_t svuzp1[_s16](svint16_t op1, svint16_t op2) : "UZP1 Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector UnzipEven(Vector left, Vector right); + + /// svint32_t svuzp1[_s32](svint32_t op1, svint32_t op2) : "UZP1 Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector UnzipEven(Vector left, Vector right); + + /// svint64_t svuzp1[_s64](svint64_t op1, svint64_t op2) : "UZP1 Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector UnzipEven(Vector left, Vector right); + + /// svuint8_t svuzp1[_u8](svuint8_t op1, svuint8_t op2) : "UZP1 Zresult.B, Zop1.B, Zop2.B" + /// svbool_t svuzp1_b8(svbool_t op1, svbool_t op2) : "UZP1 Presult.B, Pop1.B, Pop2.B" + public static unsafe Vector UnzipEven(Vector left, Vector right); + + /// svuint16_t svuzp1[_u16](svuint16_t op1, svuint16_t op2) : "UZP1 Zresult.H, Zop1.H, Zop2.H" + /// svbool_t svuzp1_b16(svbool_t op1, svbool_t op2) : "UZP1 Presult.H, Pop1.H, Pop2.H" + public static unsafe Vector UnzipEven(Vector left, Vector right); + + /// svuint32_t svuzp1[_u32](svuint32_t op1, svuint32_t op2) : "UZP1 Zresult.S, Zop1.S, Zop2.S" + /// svbool_t svuzp1_b32(svbool_t op1, svbool_t op2) : "UZP1 Presult.S, Pop1.S, Pop2.S" + public static unsafe Vector UnzipEven(Vector left, Vector right); + + /// svuint64_t svuzp1[_u64](svuint64_t op1, svuint64_t op2) : "UZP1 Zresult.D, Zop1.D, Zop2.D" + /// svbool_t svuzp1_b64(svbool_t op1, svbool_t op2) : "UZP1 Presult.D, Pop1.D, Pop2.D" + public static unsafe Vector UnzipEven(Vector left, Vector right); + + + /// UnzipOdd : Concatenate odd elements from two inputs + + /// svfloat32_t svuzp2[_f32](svfloat32_t op1, svfloat32_t op2) : "UZP2 Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector UnzipOdd(Vector left, Vector right); + + /// svfloat64_t svuzp2[_f64](svfloat64_t op1, svfloat64_t op2) : "UZP2 Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector UnzipOdd(Vector left, Vector right); + + /// svint8_t svuzp2[_s8](svint8_t op1, svint8_t op2) : "UZP2 Zresult.B, Zop1.B, Zop2.B" + public static unsafe Vector UnzipOdd(Vector left, Vector right); + + /// svint16_t svuzp2[_s16](svint16_t op1, svint16_t op2) : "UZP2 Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector UnzipOdd(Vector left, Vector right); + + /// svint32_t svuzp2[_s32](svint32_t op1, svint32_t op2) : "UZP2 Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector UnzipOdd(Vector left, Vector right); + + /// svint64_t svuzp2[_s64](svint64_t op1, svint64_t op2) : "UZP2 Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector UnzipOdd(Vector left, Vector right); + + /// svuint8_t svuzp2[_u8](svuint8_t op1, svuint8_t op2) : "UZP2 Zresult.B, Zop1.B, Zop2.B" + /// svbool_t svuzp2_b8(svbool_t op1, svbool_t op2) : "UZP2 Presult.B, Pop1.B, Pop2.B" + public static unsafe Vector UnzipOdd(Vector left, Vector right); + + /// svuint16_t svuzp2[_u16](svuint16_t op1, svuint16_t op2) : "UZP2 Zresult.H, Zop1.H, Zop2.H" + /// svbool_t svuzp2_b16(svbool_t op1, svbool_t op2) : "UZP2 Presult.H, Pop1.H, Pop2.H" + public static unsafe Vector UnzipOdd(Vector left, Vector right); + + /// svuint32_t svuzp2[_u32](svuint32_t op1, svuint32_t op2) : "UZP2 Zresult.S, Zop1.S, Zop2.S" + /// svbool_t svuzp2_b32(svbool_t op1, svbool_t op2) : "UZP2 Presult.S, Pop1.S, Pop2.S" + public static unsafe Vector UnzipOdd(Vector left, Vector right); + + /// svuint64_t svuzp2[_u64](svuint64_t op1, svuint64_t op2) : "UZP2 Zresult.D, Zop1.D, Zop2.D" + /// svbool_t svuzp2_b64(svbool_t op1, svbool_t op2) : "UZP2 Presult.D, Pop1.D, Pop2.D" + public static unsafe Vector UnzipOdd(Vector left, Vector right); + + + /// VectorTableLookup : Table lookup in single-vector table + + /// svfloat32_t svtbl[_f32](svfloat32_t data, svuint32_t indices) : "TBL Zresult.S, Zdata.S, Zindices.S" + public static unsafe Vector VectorTableLookup(Vector data, Vector indices); + + /// svfloat64_t svtbl[_f64](svfloat64_t data, svuint64_t indices) : "TBL Zresult.D, Zdata.D, Zindices.D" + public static unsafe Vector VectorTableLookup(Vector data, Vector indices); + + /// svint8_t svtbl[_s8](svint8_t data, svuint8_t indices) : "TBL Zresult.B, Zdata.B, Zindices.B" + public static unsafe Vector VectorTableLookup(Vector data, Vector indices); + + /// svint16_t svtbl[_s16](svint16_t data, svuint16_t indices) : "TBL Zresult.H, Zdata.H, Zindices.H" + public static unsafe Vector VectorTableLookup(Vector data, Vector indices); + + /// svint32_t svtbl[_s32](svint32_t data, svuint32_t indices) : "TBL Zresult.S, Zdata.S, Zindices.S" + public static unsafe Vector VectorTableLookup(Vector data, Vector indices); + + /// svint64_t svtbl[_s64](svint64_t data, svuint64_t indices) : "TBL Zresult.D, Zdata.D, Zindices.D" + public static unsafe Vector VectorTableLookup(Vector data, Vector indices); + + /// svuint8_t svtbl[_u8](svuint8_t data, svuint8_t indices) : "TBL Zresult.B, Zdata.B, Zindices.B" + public static unsafe Vector VectorTableLookup(Vector data, Vector indices); + + /// svuint16_t svtbl[_u16](svuint16_t data, svuint16_t indices) : "TBL Zresult.H, Zdata.H, Zindices.H" + public static unsafe Vector VectorTableLookup(Vector data, Vector indices); + + /// svuint32_t svtbl[_u32](svuint32_t data, svuint32_t indices) : "TBL Zresult.S, Zdata.S, Zindices.S" + public static unsafe Vector VectorTableLookup(Vector data, Vector indices); + + /// svuint64_t svtbl[_u64](svuint64_t data, svuint64_t indices) : "TBL Zresult.D, Zdata.D, Zindices.D" + public static unsafe Vector VectorTableLookup(Vector data, Vector indices); + + + /// ZipHigh : Interleave elements from high halves of two inputs + + /// svfloat32_t svzip2[_f32](svfloat32_t op1, svfloat32_t op2) : "ZIP2 Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector ZipHigh(Vector left, Vector right); + + /// svfloat64_t svzip2[_f64](svfloat64_t op1, svfloat64_t op2) : "ZIP2 Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector ZipHigh(Vector left, Vector right); + + /// svint8_t svzip2[_s8](svint8_t op1, svint8_t op2) : "ZIP2 Zresult.B, Zop1.B, Zop2.B" + public static unsafe Vector ZipHigh(Vector left, Vector right); + + /// svint16_t svzip2[_s16](svint16_t op1, svint16_t op2) : "ZIP2 Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector ZipHigh(Vector left, Vector right); + + /// svint32_t svzip2[_s32](svint32_t op1, svint32_t op2) : "ZIP2 Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector ZipHigh(Vector left, Vector right); + + /// svint64_t svzip2[_s64](svint64_t op1, svint64_t op2) : "ZIP2 Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector ZipHigh(Vector left, Vector right); + + /// svuint8_t svzip2[_u8](svuint8_t op1, svuint8_t op2) : "ZIP2 Zresult.B, Zop1.B, Zop2.B" + /// svbool_t svzip2_b8(svbool_t op1, svbool_t op2) : "ZIP2 Presult.B, Pop1.B, Pop2.B" + public static unsafe Vector ZipHigh(Vector left, Vector right); + + /// svuint16_t svzip2[_u16](svuint16_t op1, svuint16_t op2) : "ZIP2 Zresult.H, Zop1.H, Zop2.H" + /// svbool_t svzip2_b16(svbool_t op1, svbool_t op2) : "ZIP2 Presult.H, Pop1.H, Pop2.H" + public static unsafe Vector ZipHigh(Vector left, Vector right); + + /// svuint32_t svzip2[_u32](svuint32_t op1, svuint32_t op2) : "ZIP2 Zresult.S, Zop1.S, Zop2.S" + /// svbool_t svzip2_b32(svbool_t op1, svbool_t op2) : "ZIP2 Presult.S, Pop1.S, Pop2.S" + public static unsafe Vector ZipHigh(Vector left, Vector right); + + /// svuint64_t svzip2[_u64](svuint64_t op1, svuint64_t op2) : "ZIP2 Zresult.D, Zop1.D, Zop2.D" + /// svbool_t svzip2_b64(svbool_t op1, svbool_t op2) : "ZIP2 Presult.D, Pop1.D, Pop2.D" + public static unsafe Vector ZipHigh(Vector left, Vector right); + + + /// ZipLow : Interleave elements from low halves of two inputs + + /// svfloat32_t svzip1[_f32](svfloat32_t op1, svfloat32_t op2) : "ZIP1 Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector ZipLow(Vector left, Vector right); + + /// svfloat64_t svzip1[_f64](svfloat64_t op1, svfloat64_t op2) : "ZIP1 Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector ZipLow(Vector left, Vector right); + + /// svint8_t svzip1[_s8](svint8_t op1, svint8_t op2) : "ZIP1 Zresult.B, Zop1.B, Zop2.B" + public static unsafe Vector ZipLow(Vector left, Vector right); + + /// svint16_t svzip1[_s16](svint16_t op1, svint16_t op2) : "ZIP1 Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector ZipLow(Vector left, Vector right); + + /// svint32_t svzip1[_s32](svint32_t op1, svint32_t op2) : "ZIP1 Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector ZipLow(Vector left, Vector right); + + /// svint64_t svzip1[_s64](svint64_t op1, svint64_t op2) : "ZIP1 Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector ZipLow(Vector left, Vector right); + + /// svuint8_t svzip1[_u8](svuint8_t op1, svuint8_t op2) : "ZIP1 Zresult.B, Zop1.B, Zop2.B" + /// svbool_t svzip1_b8(svbool_t op1, svbool_t op2) : "ZIP1 Presult.B, Pop1.B, Pop2.B" + public static unsafe Vector ZipLow(Vector left, Vector right); + + /// svuint16_t svzip1[_u16](svuint16_t op1, svuint16_t op2) : "ZIP1 Zresult.H, Zop1.H, Zop2.H" + /// svbool_t svzip1_b16(svbool_t op1, svbool_t op2) : "ZIP1 Presult.H, Pop1.H, Pop2.H" + public static unsafe Vector ZipLow(Vector left, Vector right); + + /// svuint32_t svzip1[_u32](svuint32_t op1, svuint32_t op2) : "ZIP1 Zresult.S, Zop1.S, Zop2.S" + /// svbool_t svzip1_b32(svbool_t op1, svbool_t op2) : "ZIP1 Presult.S, Pop1.S, Pop2.S" + public static unsafe Vector ZipLow(Vector left, Vector right); + + /// svuint64_t svzip1[_u64](svuint64_t op1, svuint64_t op2) : "ZIP1 Zresult.D, Zop1.D, Zop2.D" + /// svbool_t svzip1_b64(svbool_t op1, svbool_t op2) : "ZIP1 Presult.D, Pop1.D, Pop2.D" + public static unsafe Vector ZipLow(Vector left, Vector right); + + + /// total method signatures: 120 + /// total method names: 16 +} + + + /// Rejected: + /// public static unsafe Vector CreateSeries(sbyte base, sbyte step); // svindex_s8 + /// public static unsafe Vector CreateSeries(short base, short step); // svindex_s16 + /// public static unsafe Vector CreateSeries(int base, int step); // svindex_s32 + /// public static unsafe Vector CreateSeries(long base, long step); // svindex_s64 + /// public static unsafe Vector CreateSeries(byte base, byte step); // svindex_u8 + /// public static unsafe Vector CreateSeries(ushort base, ushort step); // svindex_u16 + /// public static unsafe Vector CreateSeries(uint base, uint step); // svindex_u32 + /// public static unsafe Vector CreateSeries(ulong base, ulong step); // svindex_u64 + /// public static unsafe Vector DuplicateSelectedScalarToVector(float value); // svdup[_n]_f32 or svdup[_n]_f32_m or svdup[_n]_f32_x or svdup[_n]_f32_z + /// public static unsafe Vector DuplicateSelectedScalarToVector(double value); // svdup[_n]_f64 or svdup[_n]_f64_m or svdup[_n]_f64_x or svdup[_n]_f64_z + /// public static unsafe Vector DuplicateSelectedScalarToVector(sbyte value); // svdup[_n]_s8 or svdup[_n]_s8_m or svdup[_n]_s8_x or svdup[_n]_s8_z + /// public static unsafe Vector DuplicateSelectedScalarToVector(short value); // svdup[_n]_s16 or svdup[_n]_s16_m or svdup[_n]_s16_x or svdup[_n]_s16_z + /// public static unsafe Vector DuplicateSelectedScalarToVector(int value); // svdup[_n]_s32 or svdup[_n]_s32_m or svdup[_n]_s32_x or svdup[_n]_s32_z + /// public static unsafe Vector DuplicateSelectedScalarToVector(long value); // svdup[_n]_s64 or svdup[_n]_s64_m or svdup[_n]_s64_x or svdup[_n]_s64_z + /// public static unsafe Vector DuplicateSelectedScalarToVector(byte value); // svdup[_n]_u8 or svdup[_n]_u8_m or svdup[_n]_u8_x or svdup[_n]_u8_z + /// public static unsafe Vector DuplicateSelectedScalarToVector(ushort value); // svdup[_n]_u16 or svdup[_n]_u16_m or svdup[_n]_u16_x or svdup[_n]_u16_z + /// public static unsafe Vector DuplicateSelectedScalarToVector(uint value); // svdup[_n]_u32 or svdup[_n]_u32_m or svdup[_n]_u32_x or svdup[_n]_u32_z + /// public static unsafe Vector DuplicateSelectedScalarToVector(ulong value); // svdup[_n]_u64 or svdup[_n]_u64_m or svdup[_n]_u64_x or svdup[_n]_u64_z + /// public static unsafe Vector DuplicateSelectedScalarToVector(bool value); // svdup[_n]_b8 + /// public static unsafe Vector DuplicateSelectedScalarToVector(bool value); // svdup[_n]_b16 + /// public static unsafe Vector DuplicateSelectedScalarToVector(bool value); // svdup[_n]_b32 + /// public static unsafe Vector DuplicateSelectedScalarToVector(bool value); // svdup[_n]_b64 + /// public static unsafe Vector Move(Vector value); // svmov[_b]_z + /// public static unsafe Vector Move(Vector value); // svmov[_b]_z + /// public static unsafe Vector Move(Vector value); // svmov[_b]_z + /// public static unsafe Vector Move(Vector value); // svmov[_b]_z + /// public static unsafe Vector Move(Vector value); // svmov[_b]_z + /// public static unsafe Vector Move(Vector value); // svmov[_b]_z + /// public static unsafe Vector Move(Vector value); // svmov[_b]_z + /// public static unsafe Vector Move(Vector value); // svmov[_b]_z + /// Total Rejected: 30 + + /// Total ACLE covered across API: 258 + diff --git a/sve_api/out_api/apiraw_FEAT_SVE__bitwise.cs b/sve_api/out_api/apiraw_FEAT_SVE__bitwise.cs new file mode 100644 index 0000000000000..32cf16a0a9eff --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_SVE__bitwise.cs @@ -0,0 +1,800 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: bitwise +{ + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector And(Vector left, Vector right); // AND // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector AndAcross(Vector value); // ANDV // predicated + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector AndNot(Vector left, Vector right); // NAND // predicated + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector BitwiseClear(Vector left, Vector right); // BIC // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector BooleanNot(Vector value); // CNOT // predicated, MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector InsertIntoShiftedVector(Vector left, T right); // INSR + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector Not(Vector value); // NOT or EOR // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector Or(Vector left, Vector right); // ORR // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector OrAcross(Vector value); // ORV // predicated + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector OrNot(Vector left, Vector right); // NOR or ORN // predicated + + /// T: [sbyte, byte], [short, ushort], [int, uint], [long, ulong], [sbyte, ulong], [short, ulong], [int, ulong], [byte, ulong], [ushort, ulong], [uint, ulong] + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right); // LSL or LSLR // predicated, MOVPRFX + + /// T: byte, ushort, uint, ulong + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right); // LSL or LSLR // predicated, MOVPRFX + + /// T: [sbyte, byte], [short, ushort], [int, uint], [long, ulong], [sbyte, ulong], [short, ulong], [int, ulong] + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right); // ASR or ASRR // predicated, MOVPRFX + + /// T: sbyte, short, int, long + public static unsafe Vector ShiftRightArithmeticForDivide(Vector value, [ConstantExpected] byte control); // ASRD // predicated, MOVPRFX + + /// T: byte, ushort, uint, ulong + public static unsafe Vector ShiftRightLogical(Vector left, Vector right); // LSR or LSRR // predicated, MOVPRFX + + /// T: [byte, ulong], [ushort, ulong], [uint, ulong] + public static unsafe Vector ShiftRightLogical(Vector left, Vector right); // LSR // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector Xor(Vector left, Vector right); // EOR // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector XorAcross(Vector value); // EORV // predicated + + /// total method signatures: 18 + + + /// Optional Entries: + + public static unsafe Vector ShiftRightArithmetic(Vector left, byte right); // ASR or ASRR // predicated, MOVPRFX + + public static unsafe Vector ShiftRightArithmetic(Vector left, ushort right); // ASR or ASRR // predicated, MOVPRFX + + public static unsafe Vector ShiftRightArithmetic(Vector left, uint right); // ASR or ASRR // predicated, MOVPRFX + + /// T: long, sbyte, short, int + public static unsafe Vector ShiftRightArithmetic(Vector left, ulong right); // ASR or ASRR // predicated, MOVPRFX + + /// total optional method signatures: 4 + +} + + +/// Full API +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: bitwise +{ + /// And : Bitwise AND + + /// svint8_t svand[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) : "AND Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; AND Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svand[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) : "AND Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "AND Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "AND Zresult.D, Zop1.D, Zop2.D" + /// svint8_t svand[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; AND Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; AND Zresult.B, Pg/M, Zresult.B, Zop1.B" + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "AND Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector And(Vector left, Vector right); + + /// svint16_t svand[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) : "AND Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; AND Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svand[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) : "AND Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "AND Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "AND Zresult.D, Zop1.D, Zop2.D" + /// svint16_t svand[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; AND Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; AND Zresult.H, Pg/M, Zresult.H, Zop1.H" + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "AND Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector And(Vector left, Vector right); + + /// svint32_t svand[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) : "AND Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; AND Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svand[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) : "AND Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "AND Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "AND Zresult.D, Zop1.D, Zop2.D" + /// svint32_t svand[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; AND Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; AND Zresult.S, Pg/M, Zresult.S, Zop1.S" + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "AND Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector And(Vector left, Vector right); + + /// svint64_t svand[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) : "AND Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; AND Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svand[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) : "AND Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "AND Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "AND Zresult.D, Zop1.D, Zop2.D" + /// svint64_t svand[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; AND Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; AND Zresult.D, Pg/M, Zresult.D, Zop1.D" + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "AND Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector And(Vector left, Vector right); + + /// svuint8_t svand[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) : "AND Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; AND Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svand[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) : "AND Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "AND Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "AND Zresult.D, Zop1.D, Zop2.D" + /// svuint8_t svand[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; AND Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; AND Zresult.B, Pg/M, Zresult.B, Zop1.B" + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "AND Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector And(Vector left, Vector right); + + /// svuint16_t svand[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) : "AND Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; AND Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svand[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) : "AND Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "AND Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "AND Zresult.D, Zop1.D, Zop2.D" + /// svuint16_t svand[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; AND Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; AND Zresult.H, Pg/M, Zresult.H, Zop1.H" + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "AND Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector And(Vector left, Vector right); + + /// svuint32_t svand[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) : "AND Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; AND Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svand[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) : "AND Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "AND Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "AND Zresult.D, Zop1.D, Zop2.D" + /// svuint32_t svand[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; AND Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; AND Zresult.S, Pg/M, Zresult.S, Zop1.S" + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "AND Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector And(Vector left, Vector right); + + /// svuint64_t svand[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) : "AND Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; AND Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svand[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) : "AND Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "AND Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "AND Zresult.D, Zop1.D, Zop2.D" + /// svuint64_t svand[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; AND Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; AND Zresult.D, Pg/M, Zresult.D, Zop1.D" + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "AND Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector And(Vector left, Vector right); + + + /// AndAcross : Bitwise AND reduction to scalar + + /// int8_t svandv[_s8](svbool_t pg, svint8_t op) : "ANDV Bresult, Pg, Zop.B" + public static unsafe Vector AndAcross(Vector value); + + /// int16_t svandv[_s16](svbool_t pg, svint16_t op) : "ANDV Hresult, Pg, Zop.H" + public static unsafe Vector AndAcross(Vector value); + + /// int32_t svandv[_s32](svbool_t pg, svint32_t op) : "ANDV Sresult, Pg, Zop.S" + public static unsafe Vector AndAcross(Vector value); + + /// int64_t svandv[_s64](svbool_t pg, svint64_t op) : "ANDV Dresult, Pg, Zop.D" + public static unsafe Vector AndAcross(Vector value); + + /// uint8_t svandv[_u8](svbool_t pg, svuint8_t op) : "ANDV Bresult, Pg, Zop.B" + public static unsafe Vector AndAcross(Vector value); + + /// uint16_t svandv[_u16](svbool_t pg, svuint16_t op) : "ANDV Hresult, Pg, Zop.H" + public static unsafe Vector AndAcross(Vector value); + + /// uint32_t svandv[_u32](svbool_t pg, svuint32_t op) : "ANDV Sresult, Pg, Zop.S" + public static unsafe Vector AndAcross(Vector value); + + /// uint64_t svandv[_u64](svbool_t pg, svuint64_t op) : "ANDV Dresult, Pg, Zop.D" + public static unsafe Vector AndAcross(Vector value); + + + /// AndNot : Bitwise NAND + + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "NAND Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector AndNot(Vector left, Vector right); + + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "NAND Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector AndNot(Vector left, Vector right); + + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "NAND Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector AndNot(Vector left, Vector right); + + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "NAND Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector AndNot(Vector left, Vector right); + + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "NAND Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector AndNot(Vector left, Vector right); + + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "NAND Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector AndNot(Vector left, Vector right); + + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "NAND Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector AndNot(Vector left, Vector right); + + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "NAND Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector AndNot(Vector left, Vector right); + + + /// BitwiseClear : Bitwise clear + + /// svint8_t svbic[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) : "BIC Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; BIC Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svbic[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) : "BIC Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "BIC Zresult.D, Zop1.D, Zop2.D" + /// svint8_t svbic[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; BIC Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BIC Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector BitwiseClear(Vector left, Vector right); + + /// svint16_t svbic[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) : "BIC Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; BIC Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svbic[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) : "BIC Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "BIC Zresult.D, Zop1.D, Zop2.D" + /// svint16_t svbic[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; BIC Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BIC Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector BitwiseClear(Vector left, Vector right); + + /// svint32_t svbic[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) : "BIC Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; BIC Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svbic[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) : "BIC Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "BIC Zresult.D, Zop1.D, Zop2.D" + /// svint32_t svbic[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; BIC Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BIC Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector BitwiseClear(Vector left, Vector right); + + /// svint64_t svbic[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) : "BIC Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; BIC Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svbic[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) : "BIC Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "BIC Zresult.D, Zop1.D, Zop2.D" + /// svint64_t svbic[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; BIC Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BIC Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector BitwiseClear(Vector left, Vector right); + + /// svuint8_t svbic[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) : "BIC Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; BIC Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svbic[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) : "BIC Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "BIC Zresult.D, Zop1.D, Zop2.D" + /// svuint8_t svbic[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; BIC Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BIC Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector BitwiseClear(Vector left, Vector right); + + /// svuint16_t svbic[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) : "BIC Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; BIC Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svbic[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) : "BIC Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "BIC Zresult.D, Zop1.D, Zop2.D" + /// svuint16_t svbic[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; BIC Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BIC Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector BitwiseClear(Vector left, Vector right); + + /// svuint32_t svbic[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) : "BIC Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; BIC Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svbic[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) : "BIC Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "BIC Zresult.D, Zop1.D, Zop2.D" + /// svuint32_t svbic[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; BIC Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BIC Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector BitwiseClear(Vector left, Vector right); + + /// svuint64_t svbic[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) : "BIC Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; BIC Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svbic[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) : "BIC Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "BIC Zresult.D, Zop1.D, Zop2.D" + /// svuint64_t svbic[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; BIC Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BIC Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector BitwiseClear(Vector left, Vector right); + + + /// BooleanNot : Logically invert boolean condition + + /// svint8_t svcnot[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) : "CNOT Ztied.B, Pg/M, Zop.B" or "MOVPRFX Zresult, Zinactive; CNOT Zresult.B, Pg/M, Zop.B" + /// svint8_t svcnot[_s8]_x(svbool_t pg, svint8_t op) : "CNOT Ztied.B, Pg/M, Ztied.B" or "MOVPRFX Zresult, Zop; CNOT Zresult.B, Pg/M, Zop.B" + /// svint8_t svcnot[_s8]_z(svbool_t pg, svint8_t op) : "MOVPRFX Zresult.B, Pg/Z, Zop.B; CNOT Zresult.B, Pg/M, Zop.B" + public static unsafe Vector BooleanNot(Vector value); + + /// svint16_t svcnot[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) : "CNOT Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; CNOT Zresult.H, Pg/M, Zop.H" + /// svint16_t svcnot[_s16]_x(svbool_t pg, svint16_t op) : "CNOT Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; CNOT Zresult.H, Pg/M, Zop.H" + /// svint16_t svcnot[_s16]_z(svbool_t pg, svint16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; CNOT Zresult.H, Pg/M, Zop.H" + public static unsafe Vector BooleanNot(Vector value); + + /// svint32_t svcnot[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) : "CNOT Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; CNOT Zresult.S, Pg/M, Zop.S" + /// svint32_t svcnot[_s32]_x(svbool_t pg, svint32_t op) : "CNOT Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; CNOT Zresult.S, Pg/M, Zop.S" + /// svint32_t svcnot[_s32]_z(svbool_t pg, svint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; CNOT Zresult.S, Pg/M, Zop.S" + public static unsafe Vector BooleanNot(Vector value); + + /// svint64_t svcnot[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) : "CNOT Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; CNOT Zresult.D, Pg/M, Zop.D" + /// svint64_t svcnot[_s64]_x(svbool_t pg, svint64_t op) : "CNOT Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; CNOT Zresult.D, Pg/M, Zop.D" + /// svint64_t svcnot[_s64]_z(svbool_t pg, svint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; CNOT Zresult.D, Pg/M, Zop.D" + public static unsafe Vector BooleanNot(Vector value); + + /// svuint8_t svcnot[_u8]_m(svuint8_t inactive, svbool_t pg, svuint8_t op) : "CNOT Ztied.B, Pg/M, Zop.B" or "MOVPRFX Zresult, Zinactive; CNOT Zresult.B, Pg/M, Zop.B" + /// svuint8_t svcnot[_u8]_x(svbool_t pg, svuint8_t op) : "CNOT Ztied.B, Pg/M, Ztied.B" or "MOVPRFX Zresult, Zop; CNOT Zresult.B, Pg/M, Zop.B" + /// svuint8_t svcnot[_u8]_z(svbool_t pg, svuint8_t op) : "MOVPRFX Zresult.B, Pg/Z, Zop.B; CNOT Zresult.B, Pg/M, Zop.B" + public static unsafe Vector BooleanNot(Vector value); + + /// svuint16_t svcnot[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) : "CNOT Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; CNOT Zresult.H, Pg/M, Zop.H" + /// svuint16_t svcnot[_u16]_x(svbool_t pg, svuint16_t op) : "CNOT Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; CNOT Zresult.H, Pg/M, Zop.H" + /// svuint16_t svcnot[_u16]_z(svbool_t pg, svuint16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; CNOT Zresult.H, Pg/M, Zop.H" + public static unsafe Vector BooleanNot(Vector value); + + /// svuint32_t svcnot[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) : "CNOT Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; CNOT Zresult.S, Pg/M, Zop.S" + /// svuint32_t svcnot[_u32]_x(svbool_t pg, svuint32_t op) : "CNOT Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; CNOT Zresult.S, Pg/M, Zop.S" + /// svuint32_t svcnot[_u32]_z(svbool_t pg, svuint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; CNOT Zresult.S, Pg/M, Zop.S" + public static unsafe Vector BooleanNot(Vector value); + + /// svuint64_t svcnot[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) : "CNOT Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; CNOT Zresult.D, Pg/M, Zop.D" + /// svuint64_t svcnot[_u64]_x(svbool_t pg, svuint64_t op) : "CNOT Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; CNOT Zresult.D, Pg/M, Zop.D" + /// svuint64_t svcnot[_u64]_z(svbool_t pg, svuint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; CNOT Zresult.D, Pg/M, Zop.D" + public static unsafe Vector BooleanNot(Vector value); + + + /// InsertIntoShiftedVector : Insert scalar into shifted vector + + /// svfloat32_t svinsr[_n_f32](svfloat32_t op1, float32_t op2) : "INSR Ztied1.S, Wop2" or "INSR Ztied1.S, Sop2" + public static unsafe Vector InsertIntoShiftedVector(Vector left, float right); + + /// svfloat64_t svinsr[_n_f64](svfloat64_t op1, float64_t op2) : "INSR Ztied1.D, Xop2" or "INSR Ztied1.D, Dop2" + public static unsafe Vector InsertIntoShiftedVector(Vector left, double right); + + /// svint8_t svinsr[_n_s8](svint8_t op1, int8_t op2) : "INSR Ztied1.B, Wop2" or "INSR Ztied1.B, Bop2" + public static unsafe Vector InsertIntoShiftedVector(Vector left, sbyte right); + + /// svint16_t svinsr[_n_s16](svint16_t op1, int16_t op2) : "INSR Ztied1.H, Wop2" or "INSR Ztied1.H, Hop2" + public static unsafe Vector InsertIntoShiftedVector(Vector left, short right); + + /// svint32_t svinsr[_n_s32](svint32_t op1, int32_t op2) : "INSR Ztied1.S, Wop2" or "INSR Ztied1.S, Sop2" + public static unsafe Vector InsertIntoShiftedVector(Vector left, int right); + + /// svint64_t svinsr[_n_s64](svint64_t op1, int64_t op2) : "INSR Ztied1.D, Xop2" or "INSR Ztied1.D, Dop2" + public static unsafe Vector InsertIntoShiftedVector(Vector left, long right); + + /// svuint8_t svinsr[_n_u8](svuint8_t op1, uint8_t op2) : "INSR Ztied1.B, Wop2" or "INSR Ztied1.B, Bop2" + public static unsafe Vector InsertIntoShiftedVector(Vector left, byte right); + + /// svuint16_t svinsr[_n_u16](svuint16_t op1, uint16_t op2) : "INSR Ztied1.H, Wop2" or "INSR Ztied1.H, Hop2" + public static unsafe Vector InsertIntoShiftedVector(Vector left, ushort right); + + /// svuint32_t svinsr[_n_u32](svuint32_t op1, uint32_t op2) : "INSR Ztied1.S, Wop2" or "INSR Ztied1.S, Sop2" + public static unsafe Vector InsertIntoShiftedVector(Vector left, uint right); + + /// svuint64_t svinsr[_n_u64](svuint64_t op1, uint64_t op2) : "INSR Ztied1.D, Xop2" or "INSR Ztied1.D, Dop2" + public static unsafe Vector InsertIntoShiftedVector(Vector left, ulong right); + + + /// Not : Bitwise invert + + /// svint8_t svnot[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) : "NOT Ztied.B, Pg/M, Zop.B" or "MOVPRFX Zresult, Zinactive; NOT Zresult.B, Pg/M, Zop.B" + /// svint8_t svnot[_s8]_x(svbool_t pg, svint8_t op) : "NOT Ztied.B, Pg/M, Ztied.B" or "MOVPRFX Zresult, Zop; NOT Zresult.B, Pg/M, Zop.B" + /// svint8_t svnot[_s8]_z(svbool_t pg, svint8_t op) : "MOVPRFX Zresult.B, Pg/Z, Zop.B; NOT Zresult.B, Pg/M, Zop.B" + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) : "EOR Presult.B, Pg/Z, Pop.B, Pg.B" + public static unsafe Vector Not(Vector value); + + /// svint16_t svnot[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) : "NOT Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; NOT Zresult.H, Pg/M, Zop.H" + /// svint16_t svnot[_s16]_x(svbool_t pg, svint16_t op) : "NOT Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; NOT Zresult.H, Pg/M, Zop.H" + /// svint16_t svnot[_s16]_z(svbool_t pg, svint16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; NOT Zresult.H, Pg/M, Zop.H" + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) : "EOR Presult.B, Pg/Z, Pop.B, Pg.B" + public static unsafe Vector Not(Vector value); + + /// svint32_t svnot[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) : "NOT Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; NOT Zresult.S, Pg/M, Zop.S" + /// svint32_t svnot[_s32]_x(svbool_t pg, svint32_t op) : "NOT Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; NOT Zresult.S, Pg/M, Zop.S" + /// svint32_t svnot[_s32]_z(svbool_t pg, svint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; NOT Zresult.S, Pg/M, Zop.S" + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) : "EOR Presult.B, Pg/Z, Pop.B, Pg.B" + public static unsafe Vector Not(Vector value); + + /// svint64_t svnot[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) : "NOT Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; NOT Zresult.D, Pg/M, Zop.D" + /// svint64_t svnot[_s64]_x(svbool_t pg, svint64_t op) : "NOT Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; NOT Zresult.D, Pg/M, Zop.D" + /// svint64_t svnot[_s64]_z(svbool_t pg, svint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; NOT Zresult.D, Pg/M, Zop.D" + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) : "EOR Presult.B, Pg/Z, Pop.B, Pg.B" + public static unsafe Vector Not(Vector value); + + /// svuint8_t svnot[_u8]_m(svuint8_t inactive, svbool_t pg, svuint8_t op) : "NOT Ztied.B, Pg/M, Zop.B" or "MOVPRFX Zresult, Zinactive; NOT Zresult.B, Pg/M, Zop.B" + /// svuint8_t svnot[_u8]_x(svbool_t pg, svuint8_t op) : "NOT Ztied.B, Pg/M, Ztied.B" or "MOVPRFX Zresult, Zop; NOT Zresult.B, Pg/M, Zop.B" + /// svuint8_t svnot[_u8]_z(svbool_t pg, svuint8_t op) : "MOVPRFX Zresult.B, Pg/Z, Zop.B; NOT Zresult.B, Pg/M, Zop.B" + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) : "EOR Presult.B, Pg/Z, Pop.B, Pg.B" + public static unsafe Vector Not(Vector value); + + /// svuint16_t svnot[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) : "NOT Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; NOT Zresult.H, Pg/M, Zop.H" + /// svuint16_t svnot[_u16]_x(svbool_t pg, svuint16_t op) : "NOT Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; NOT Zresult.H, Pg/M, Zop.H" + /// svuint16_t svnot[_u16]_z(svbool_t pg, svuint16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; NOT Zresult.H, Pg/M, Zop.H" + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) : "EOR Presult.B, Pg/Z, Pop.B, Pg.B" + public static unsafe Vector Not(Vector value); + + /// svuint32_t svnot[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) : "NOT Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; NOT Zresult.S, Pg/M, Zop.S" + /// svuint32_t svnot[_u32]_x(svbool_t pg, svuint32_t op) : "NOT Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; NOT Zresult.S, Pg/M, Zop.S" + /// svuint32_t svnot[_u32]_z(svbool_t pg, svuint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; NOT Zresult.S, Pg/M, Zop.S" + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) : "EOR Presult.B, Pg/Z, Pop.B, Pg.B" + public static unsafe Vector Not(Vector value); + + /// svuint64_t svnot[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) : "NOT Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; NOT Zresult.D, Pg/M, Zop.D" + /// svuint64_t svnot[_u64]_x(svbool_t pg, svuint64_t op) : "NOT Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; NOT Zresult.D, Pg/M, Zop.D" + /// svuint64_t svnot[_u64]_z(svbool_t pg, svuint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; NOT Zresult.D, Pg/M, Zop.D" + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) : "EOR Presult.B, Pg/Z, Pop.B, Pg.B" + public static unsafe Vector Not(Vector value); + + + /// Or : Bitwise inclusive OR + + /// svint8_t svorr[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) : "ORR Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; ORR Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svorr[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) : "ORR Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "ORR Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "ORR Zresult.D, Zop1.D, Zop2.D" + /// svint8_t svorr[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; ORR Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; ORR Zresult.B, Pg/M, Zresult.B, Zop1.B" + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "ORR Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector Or(Vector left, Vector right); + + /// svint16_t svorr[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) : "ORR Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; ORR Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svorr[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) : "ORR Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "ORR Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "ORR Zresult.D, Zop1.D, Zop2.D" + /// svint16_t svorr[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; ORR Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; ORR Zresult.H, Pg/M, Zresult.H, Zop1.H" + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "ORR Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector Or(Vector left, Vector right); + + /// svint32_t svorr[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) : "ORR Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; ORR Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svorr[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) : "ORR Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "ORR Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "ORR Zresult.D, Zop1.D, Zop2.D" + /// svint32_t svorr[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; ORR Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; ORR Zresult.S, Pg/M, Zresult.S, Zop1.S" + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "ORR Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector Or(Vector left, Vector right); + + /// svint64_t svorr[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) : "ORR Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; ORR Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svorr[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) : "ORR Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "ORR Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "ORR Zresult.D, Zop1.D, Zop2.D" + /// svint64_t svorr[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; ORR Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; ORR Zresult.D, Pg/M, Zresult.D, Zop1.D" + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "ORR Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector Or(Vector left, Vector right); + + /// svuint8_t svorr[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) : "ORR Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; ORR Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svorr[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) : "ORR Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "ORR Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "ORR Zresult.D, Zop1.D, Zop2.D" + /// svuint8_t svorr[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; ORR Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; ORR Zresult.B, Pg/M, Zresult.B, Zop1.B" + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "ORR Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector Or(Vector left, Vector right); + + /// svuint16_t svorr[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) : "ORR Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; ORR Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svorr[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) : "ORR Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "ORR Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "ORR Zresult.D, Zop1.D, Zop2.D" + /// svuint16_t svorr[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; ORR Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; ORR Zresult.H, Pg/M, Zresult.H, Zop1.H" + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "ORR Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector Or(Vector left, Vector right); + + /// svuint32_t svorr[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) : "ORR Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; ORR Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svorr[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) : "ORR Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "ORR Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "ORR Zresult.D, Zop1.D, Zop2.D" + /// svuint32_t svorr[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; ORR Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; ORR Zresult.S, Pg/M, Zresult.S, Zop1.S" + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "ORR Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector Or(Vector left, Vector right); + + /// svuint64_t svorr[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) : "ORR Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; ORR Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svorr[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) : "ORR Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "ORR Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "ORR Zresult.D, Zop1.D, Zop2.D" + /// svuint64_t svorr[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; ORR Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; ORR Zresult.D, Pg/M, Zresult.D, Zop1.D" + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "ORR Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector Or(Vector left, Vector right); + + + /// OrAcross : Bitwise inclusive OR reduction to scalar + + /// int8_t svorv[_s8](svbool_t pg, svint8_t op) : "ORV Bresult, Pg, Zop.B" + public static unsafe Vector OrAcross(Vector value); + + /// int16_t svorv[_s16](svbool_t pg, svint16_t op) : "ORV Hresult, Pg, Zop.H" + public static unsafe Vector OrAcross(Vector value); + + /// int32_t svorv[_s32](svbool_t pg, svint32_t op) : "ORV Sresult, Pg, Zop.S" + public static unsafe Vector OrAcross(Vector value); + + /// int64_t svorv[_s64](svbool_t pg, svint64_t op) : "ORV Dresult, Pg, Zop.D" + public static unsafe Vector OrAcross(Vector value); + + /// uint8_t svorv[_u8](svbool_t pg, svuint8_t op) : "ORV Bresult, Pg, Zop.B" + public static unsafe Vector OrAcross(Vector value); + + /// uint16_t svorv[_u16](svbool_t pg, svuint16_t op) : "ORV Hresult, Pg, Zop.H" + public static unsafe Vector OrAcross(Vector value); + + /// uint32_t svorv[_u32](svbool_t pg, svuint32_t op) : "ORV Sresult, Pg, Zop.S" + public static unsafe Vector OrAcross(Vector value); + + /// uint64_t svorv[_u64](svbool_t pg, svuint64_t op) : "ORV Dresult, Pg, Zop.D" + public static unsafe Vector OrAcross(Vector value); + + + /// OrNot : Bitwise NOR + + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "NOR Presult.B, Pg/Z, Pop1.B, Pop2.B" + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "ORN Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector OrNot(Vector left, Vector right); + + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "NOR Presult.B, Pg/Z, Pop1.B, Pop2.B" + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "ORN Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector OrNot(Vector left, Vector right); + + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "NOR Presult.B, Pg/Z, Pop1.B, Pop2.B" + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "ORN Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector OrNot(Vector left, Vector right); + + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "NOR Presult.B, Pg/Z, Pop1.B, Pop2.B" + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "ORN Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector OrNot(Vector left, Vector right); + + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "NOR Presult.B, Pg/Z, Pop1.B, Pop2.B" + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "ORN Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector OrNot(Vector left, Vector right); + + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "NOR Presult.B, Pg/Z, Pop1.B, Pop2.B" + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "ORN Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector OrNot(Vector left, Vector right); + + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "NOR Presult.B, Pg/Z, Pop1.B, Pop2.B" + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "ORN Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector OrNot(Vector left, Vector right); + + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "NOR Presult.B, Pg/Z, Pop1.B, Pop2.B" + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "ORN Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector OrNot(Vector left, Vector right); + + + /// ShiftLeftLogical : Logical shift left + + /// svint8_t svlsl[_s8]_m(svbool_t pg, svint8_t op1, svuint8_t op2) : "LSL Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; LSL Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svlsl[_s8]_x(svbool_t pg, svint8_t op1, svuint8_t op2) : "LSL Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "LSLR Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; LSL Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svlsl[_s8]_z(svbool_t pg, svint8_t op1, svuint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; LSL Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; LSLR Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right); + + /// svint16_t svlsl[_s16]_m(svbool_t pg, svint16_t op1, svuint16_t op2) : "LSL Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; LSL Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svlsl[_s16]_x(svbool_t pg, svint16_t op1, svuint16_t op2) : "LSL Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "LSLR Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; LSL Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svlsl[_s16]_z(svbool_t pg, svint16_t op1, svuint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; LSL Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; LSLR Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right); + + /// svint32_t svlsl[_s32]_m(svbool_t pg, svint32_t op1, svuint32_t op2) : "LSL Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; LSL Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svlsl[_s32]_x(svbool_t pg, svint32_t op1, svuint32_t op2) : "LSL Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "LSLR Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; LSL Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svlsl[_s32]_z(svbool_t pg, svint32_t op1, svuint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; LSL Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; LSLR Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right); + + /// svint64_t svlsl[_s64]_m(svbool_t pg, svint64_t op1, svuint64_t op2) : "LSL Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; LSL Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svlsl[_s64]_x(svbool_t pg, svint64_t op1, svuint64_t op2) : "LSL Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "LSLR Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; LSL Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svlsl[_s64]_z(svbool_t pg, svint64_t op1, svuint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; LSL Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; LSLR Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right); + + /// svuint8_t svlsl[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) : "LSL Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; LSL Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svlsl[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) : "LSL Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "LSLR Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; LSL Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svlsl[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; LSL Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; LSLR Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right); + + /// svuint16_t svlsl[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) : "LSL Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; LSL Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svlsl[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) : "LSL Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "LSLR Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; LSL Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svlsl[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; LSL Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; LSLR Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right); + + /// svuint32_t svlsl[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) : "LSL Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; LSL Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svlsl[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) : "LSL Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "LSLR Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; LSL Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svlsl[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; LSL Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; LSLR Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right); + + /// svuint64_t svlsl[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) : "LSL Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; LSL Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svlsl[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) : "LSL Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "LSLR Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; LSL Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svlsl[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; LSL Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; LSLR Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right); + + /// svint8_t svlsl_wide[_s8]_m(svbool_t pg, svint8_t op1, svuint64_t op2) : "LSL Ztied1.B, Pg/M, Ztied1.B, Zop2.D" or "MOVPRFX Zresult, Zop1; LSL Zresult.B, Pg/M, Zresult.B, Zop2.D" + /// svint8_t svlsl_wide[_s8]_x(svbool_t pg, svint8_t op1, svuint64_t op2) : "LSL Ztied1.B, Pg/M, Ztied1.B, Zop2.D" or "LSL Zresult.B, Zop1.B, Zop2.D" + /// svint8_t svlsl_wide[_s8]_z(svbool_t pg, svint8_t op1, svuint64_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; LSL Zresult.B, Pg/M, Zresult.B, Zop2.D" + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right); + + /// svint16_t svlsl_wide[_s16]_m(svbool_t pg, svint16_t op1, svuint64_t op2) : "LSL Ztied1.H, Pg/M, Ztied1.H, Zop2.D" or "MOVPRFX Zresult, Zop1; LSL Zresult.H, Pg/M, Zresult.H, Zop2.D" + /// svint16_t svlsl_wide[_s16]_x(svbool_t pg, svint16_t op1, svuint64_t op2) : "LSL Ztied1.H, Pg/M, Ztied1.H, Zop2.D" or "LSL Zresult.H, Zop1.H, Zop2.D" + /// svint16_t svlsl_wide[_s16]_z(svbool_t pg, svint16_t op1, svuint64_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; LSL Zresult.H, Pg/M, Zresult.H, Zop2.D" + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right); + + /// svint32_t svlsl_wide[_s32]_m(svbool_t pg, svint32_t op1, svuint64_t op2) : "LSL Ztied1.S, Pg/M, Ztied1.S, Zop2.D" or "MOVPRFX Zresult, Zop1; LSL Zresult.S, Pg/M, Zresult.S, Zop2.D" + /// svint32_t svlsl_wide[_s32]_x(svbool_t pg, svint32_t op1, svuint64_t op2) : "LSL Ztied1.S, Pg/M, Ztied1.S, Zop2.D" or "LSL Zresult.S, Zop1.S, Zop2.D" + /// svint32_t svlsl_wide[_s32]_z(svbool_t pg, svint32_t op1, svuint64_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; LSL Zresult.S, Pg/M, Zresult.S, Zop2.D" + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right); + + /// svuint8_t svlsl_wide[_u8]_m(svbool_t pg, svuint8_t op1, svuint64_t op2) : "LSL Ztied1.B, Pg/M, Ztied1.B, Zop2.D" or "MOVPRFX Zresult, Zop1; LSL Zresult.B, Pg/M, Zresult.B, Zop2.D" + /// svuint8_t svlsl_wide[_u8]_x(svbool_t pg, svuint8_t op1, svuint64_t op2) : "LSL Ztied1.B, Pg/M, Ztied1.B, Zop2.D" or "LSL Zresult.B, Zop1.B, Zop2.D" + /// svuint8_t svlsl_wide[_u8]_z(svbool_t pg, svuint8_t op1, svuint64_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; LSL Zresult.B, Pg/M, Zresult.B, Zop2.D" + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right); + + /// svuint16_t svlsl_wide[_u16]_m(svbool_t pg, svuint16_t op1, svuint64_t op2) : "LSL Ztied1.H, Pg/M, Ztied1.H, Zop2.D" or "MOVPRFX Zresult, Zop1; LSL Zresult.H, Pg/M, Zresult.H, Zop2.D" + /// svuint16_t svlsl_wide[_u16]_x(svbool_t pg, svuint16_t op1, svuint64_t op2) : "LSL Ztied1.H, Pg/M, Ztied1.H, Zop2.D" or "LSL Zresult.H, Zop1.H, Zop2.D" + /// svuint16_t svlsl_wide[_u16]_z(svbool_t pg, svuint16_t op1, svuint64_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; LSL Zresult.H, Pg/M, Zresult.H, Zop2.D" + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right); + + /// svuint32_t svlsl_wide[_u32]_m(svbool_t pg, svuint32_t op1, svuint64_t op2) : "LSL Ztied1.S, Pg/M, Ztied1.S, Zop2.D" or "MOVPRFX Zresult, Zop1; LSL Zresult.S, Pg/M, Zresult.S, Zop2.D" + /// svuint32_t svlsl_wide[_u32]_x(svbool_t pg, svuint32_t op1, svuint64_t op2) : "LSL Ztied1.S, Pg/M, Ztied1.S, Zop2.D" or "LSL Zresult.S, Zop1.S, Zop2.D" + /// svuint32_t svlsl_wide[_u32]_z(svbool_t pg, svuint32_t op1, svuint64_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; LSL Zresult.S, Pg/M, Zresult.S, Zop2.D" + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right); + + + /// ShiftRightArithmetic : Arithmetic shift right + + /// svint8_t svasr[_s8]_m(svbool_t pg, svint8_t op1, svuint8_t op2) : "ASR Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; ASR Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svasr[_s8]_x(svbool_t pg, svint8_t op1, svuint8_t op2) : "ASR Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "ASRR Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; ASR Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svasr[_s8]_z(svbool_t pg, svint8_t op1, svuint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; ASR Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; ASRR Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right); + + /// svint16_t svasr[_s16]_m(svbool_t pg, svint16_t op1, svuint16_t op2) : "ASR Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; ASR Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svasr[_s16]_x(svbool_t pg, svint16_t op1, svuint16_t op2) : "ASR Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "ASRR Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; ASR Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svasr[_s16]_z(svbool_t pg, svint16_t op1, svuint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; ASR Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; ASRR Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right); + + /// svint32_t svasr[_s32]_m(svbool_t pg, svint32_t op1, svuint32_t op2) : "ASR Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; ASR Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svasr[_s32]_x(svbool_t pg, svint32_t op1, svuint32_t op2) : "ASR Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "ASRR Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; ASR Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svasr[_s32]_z(svbool_t pg, svint32_t op1, svuint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; ASR Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; ASRR Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right); + + /// svint64_t svasr[_s64]_m(svbool_t pg, svint64_t op1, svuint64_t op2) : "ASR Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; ASR Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svasr[_s64]_x(svbool_t pg, svint64_t op1, svuint64_t op2) : "ASR Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "ASRR Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; ASR Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svasr[_s64]_z(svbool_t pg, svint64_t op1, svuint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; ASR Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; ASRR Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right); + + /// svint8_t svasr_wide[_s8]_m(svbool_t pg, svint8_t op1, svuint64_t op2) : "ASR Ztied1.B, Pg/M, Ztied1.B, Zop2.D" or "MOVPRFX Zresult, Zop1; ASR Zresult.B, Pg/M, Zresult.B, Zop2.D" + /// svint8_t svasr_wide[_s8]_x(svbool_t pg, svint8_t op1, svuint64_t op2) : "ASR Ztied1.B, Pg/M, Ztied1.B, Zop2.D" or "ASR Zresult.B, Zop1.B, Zop2.D" + /// svint8_t svasr_wide[_s8]_z(svbool_t pg, svint8_t op1, svuint64_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; ASR Zresult.B, Pg/M, Zresult.B, Zop2.D" + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right); + + /// svint16_t svasr_wide[_s16]_m(svbool_t pg, svint16_t op1, svuint64_t op2) : "ASR Ztied1.H, Pg/M, Ztied1.H, Zop2.D" or "MOVPRFX Zresult, Zop1; ASR Zresult.H, Pg/M, Zresult.H, Zop2.D" + /// svint16_t svasr_wide[_s16]_x(svbool_t pg, svint16_t op1, svuint64_t op2) : "ASR Ztied1.H, Pg/M, Ztied1.H, Zop2.D" or "ASR Zresult.H, Zop1.H, Zop2.D" + /// svint16_t svasr_wide[_s16]_z(svbool_t pg, svint16_t op1, svuint64_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; ASR Zresult.H, Pg/M, Zresult.H, Zop2.D" + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right); + + /// svint32_t svasr_wide[_s32]_m(svbool_t pg, svint32_t op1, svuint64_t op2) : "ASR Ztied1.S, Pg/M, Ztied1.S, Zop2.D" or "MOVPRFX Zresult, Zop1; ASR Zresult.S, Pg/M, Zresult.S, Zop2.D" + /// svint32_t svasr_wide[_s32]_x(svbool_t pg, svint32_t op1, svuint64_t op2) : "ASR Ztied1.S, Pg/M, Ztied1.S, Zop2.D" or "ASR Zresult.S, Zop1.S, Zop2.D" + /// svint32_t svasr_wide[_s32]_z(svbool_t pg, svint32_t op1, svuint64_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; ASR Zresult.S, Pg/M, Zresult.S, Zop2.D" + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right); + + + /// ShiftRightArithmeticForDivide : Arithmetic shift right for divide by immediate + + /// svint8_t svasrd[_n_s8]_m(svbool_t pg, svint8_t op1, uint64_t imm2) : "ASRD Ztied1.B, Pg/M, Ztied1.B, #imm2" or "MOVPRFX Zresult, Zop1; ASRD Zresult.B, Pg/M, Zresult.B, #imm2" + /// svint8_t svasrd[_n_s8]_x(svbool_t pg, svint8_t op1, uint64_t imm2) : "ASRD Ztied1.B, Pg/M, Ztied1.B, #imm2" or "MOVPRFX Zresult, Zop1; ASRD Zresult.B, Pg/M, Zresult.B, #imm2" + /// svint8_t svasrd[_n_s8]_z(svbool_t pg, svint8_t op1, uint64_t imm2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; ASRD Zresult.B, Pg/M, Zresult.B, #imm2" + public static unsafe Vector ShiftRightArithmeticForDivide(Vector value, [ConstantExpected] byte control); + + /// svint16_t svasrd[_n_s16]_m(svbool_t pg, svint16_t op1, uint64_t imm2) : "ASRD Ztied1.H, Pg/M, Ztied1.H, #imm2" or "MOVPRFX Zresult, Zop1; ASRD Zresult.H, Pg/M, Zresult.H, #imm2" + /// svint16_t svasrd[_n_s16]_x(svbool_t pg, svint16_t op1, uint64_t imm2) : "ASRD Ztied1.H, Pg/M, Ztied1.H, #imm2" or "MOVPRFX Zresult, Zop1; ASRD Zresult.H, Pg/M, Zresult.H, #imm2" + /// svint16_t svasrd[_n_s16]_z(svbool_t pg, svint16_t op1, uint64_t imm2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; ASRD Zresult.H, Pg/M, Zresult.H, #imm2" + public static unsafe Vector ShiftRightArithmeticForDivide(Vector value, [ConstantExpected] byte control); + + /// svint32_t svasrd[_n_s32]_m(svbool_t pg, svint32_t op1, uint64_t imm2) : "ASRD Ztied1.S, Pg/M, Ztied1.S, #imm2" or "MOVPRFX Zresult, Zop1; ASRD Zresult.S, Pg/M, Zresult.S, #imm2" + /// svint32_t svasrd[_n_s32]_x(svbool_t pg, svint32_t op1, uint64_t imm2) : "ASRD Ztied1.S, Pg/M, Ztied1.S, #imm2" or "MOVPRFX Zresult, Zop1; ASRD Zresult.S, Pg/M, Zresult.S, #imm2" + /// svint32_t svasrd[_n_s32]_z(svbool_t pg, svint32_t op1, uint64_t imm2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; ASRD Zresult.S, Pg/M, Zresult.S, #imm2" + public static unsafe Vector ShiftRightArithmeticForDivide(Vector value, [ConstantExpected] byte control); + + /// svint64_t svasrd[_n_s64]_m(svbool_t pg, svint64_t op1, uint64_t imm2) : "ASRD Ztied1.D, Pg/M, Ztied1.D, #imm2" or "MOVPRFX Zresult, Zop1; ASRD Zresult.D, Pg/M, Zresult.D, #imm2" + /// svint64_t svasrd[_n_s64]_x(svbool_t pg, svint64_t op1, uint64_t imm2) : "ASRD Ztied1.D, Pg/M, Ztied1.D, #imm2" or "MOVPRFX Zresult, Zop1; ASRD Zresult.D, Pg/M, Zresult.D, #imm2" + /// svint64_t svasrd[_n_s64]_z(svbool_t pg, svint64_t op1, uint64_t imm2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; ASRD Zresult.D, Pg/M, Zresult.D, #imm2" + public static unsafe Vector ShiftRightArithmeticForDivide(Vector value, [ConstantExpected] byte control); + + + /// ShiftRightLogical : Logical shift right + + /// svuint8_t svlsr[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) : "LSR Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; LSR Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svlsr[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) : "LSR Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "LSRR Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; LSR Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svlsr[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; LSR Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; LSRR Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector ShiftRightLogical(Vector left, Vector right); + + /// svuint16_t svlsr[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) : "LSR Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; LSR Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svlsr[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) : "LSR Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "LSRR Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; LSR Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svlsr[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; LSR Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; LSRR Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector ShiftRightLogical(Vector left, Vector right); + + /// svuint32_t svlsr[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) : "LSR Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; LSR Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svlsr[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) : "LSR Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "LSRR Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; LSR Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svlsr[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; LSR Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; LSRR Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector ShiftRightLogical(Vector left, Vector right); + + /// svuint64_t svlsr[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) : "LSR Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; LSR Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svlsr[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) : "LSR Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "LSRR Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; LSR Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svlsr[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; LSR Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; LSRR Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector ShiftRightLogical(Vector left, Vector right); + + /// svuint8_t svlsr_wide[_u8]_m(svbool_t pg, svuint8_t op1, svuint64_t op2) : "LSR Ztied1.B, Pg/M, Ztied1.B, Zop2.D" or "MOVPRFX Zresult, Zop1; LSR Zresult.B, Pg/M, Zresult.B, Zop2.D" + /// svuint8_t svlsr_wide[_u8]_x(svbool_t pg, svuint8_t op1, svuint64_t op2) : "LSR Ztied1.B, Pg/M, Ztied1.B, Zop2.D" or "LSR Zresult.B, Zop1.B, Zop2.D" + /// svuint8_t svlsr_wide[_u8]_z(svbool_t pg, svuint8_t op1, svuint64_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; LSR Zresult.B, Pg/M, Zresult.B, Zop2.D" + public static unsafe Vector ShiftRightLogical(Vector left, Vector right); + + /// svuint16_t svlsr_wide[_u16]_m(svbool_t pg, svuint16_t op1, svuint64_t op2) : "LSR Ztied1.H, Pg/M, Ztied1.H, Zop2.D" or "MOVPRFX Zresult, Zop1; LSR Zresult.H, Pg/M, Zresult.H, Zop2.D" + /// svuint16_t svlsr_wide[_u16]_x(svbool_t pg, svuint16_t op1, svuint64_t op2) : "LSR Ztied1.H, Pg/M, Ztied1.H, Zop2.D" or "LSR Zresult.H, Zop1.H, Zop2.D" + /// svuint16_t svlsr_wide[_u16]_z(svbool_t pg, svuint16_t op1, svuint64_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; LSR Zresult.H, Pg/M, Zresult.H, Zop2.D" + public static unsafe Vector ShiftRightLogical(Vector left, Vector right); + + /// svuint32_t svlsr_wide[_u32]_m(svbool_t pg, svuint32_t op1, svuint64_t op2) : "LSR Ztied1.S, Pg/M, Ztied1.S, Zop2.D" or "MOVPRFX Zresult, Zop1; LSR Zresult.S, Pg/M, Zresult.S, Zop2.D" + /// svuint32_t svlsr_wide[_u32]_x(svbool_t pg, svuint32_t op1, svuint64_t op2) : "LSR Ztied1.S, Pg/M, Ztied1.S, Zop2.D" or "LSR Zresult.S, Zop1.S, Zop2.D" + /// svuint32_t svlsr_wide[_u32]_z(svbool_t pg, svuint32_t op1, svuint64_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; LSR Zresult.S, Pg/M, Zresult.S, Zop2.D" + public static unsafe Vector ShiftRightLogical(Vector left, Vector right); + + + /// Xor : Bitwise exclusive OR + + /// svint8_t sveor[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) : "EOR Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; EOR Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t sveor[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) : "EOR Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "EOR Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "EOR Zresult.D, Zop1.D, Zop2.D" + /// svint8_t sveor[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; EOR Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; EOR Zresult.B, Pg/M, Zresult.B, Zop1.B" + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "EOR Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector Xor(Vector left, Vector right); + + /// svint16_t sveor[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) : "EOR Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; EOR Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t sveor[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) : "EOR Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "EOR Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "EOR Zresult.D, Zop1.D, Zop2.D" + /// svint16_t sveor[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; EOR Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; EOR Zresult.H, Pg/M, Zresult.H, Zop1.H" + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "EOR Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector Xor(Vector left, Vector right); + + /// svint32_t sveor[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) : "EOR Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; EOR Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t sveor[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) : "EOR Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "EOR Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "EOR Zresult.D, Zop1.D, Zop2.D" + /// svint32_t sveor[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; EOR Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; EOR Zresult.S, Pg/M, Zresult.S, Zop1.S" + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "EOR Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector Xor(Vector left, Vector right); + + /// svint64_t sveor[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) : "EOR Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; EOR Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t sveor[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) : "EOR Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "EOR Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "EOR Zresult.D, Zop1.D, Zop2.D" + /// svint64_t sveor[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; EOR Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; EOR Zresult.D, Pg/M, Zresult.D, Zop1.D" + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "EOR Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector Xor(Vector left, Vector right); + + /// svuint8_t sveor[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) : "EOR Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; EOR Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t sveor[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) : "EOR Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "EOR Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "EOR Zresult.D, Zop1.D, Zop2.D" + /// svuint8_t sveor[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; EOR Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; EOR Zresult.B, Pg/M, Zresult.B, Zop1.B" + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "EOR Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector Xor(Vector left, Vector right); + + /// svuint16_t sveor[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) : "EOR Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; EOR Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t sveor[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) : "EOR Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "EOR Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "EOR Zresult.D, Zop1.D, Zop2.D" + /// svuint16_t sveor[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; EOR Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; EOR Zresult.H, Pg/M, Zresult.H, Zop1.H" + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "EOR Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector Xor(Vector left, Vector right); + + /// svuint32_t sveor[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) : "EOR Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; EOR Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t sveor[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) : "EOR Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "EOR Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "EOR Zresult.D, Zop1.D, Zop2.D" + /// svuint32_t sveor[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; EOR Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; EOR Zresult.S, Pg/M, Zresult.S, Zop1.S" + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "EOR Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector Xor(Vector left, Vector right); + + /// svuint64_t sveor[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) : "EOR Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; EOR Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t sveor[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) : "EOR Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "EOR Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "EOR Zresult.D, Zop1.D, Zop2.D" + /// svuint64_t sveor[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; EOR Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; EOR Zresult.D, Pg/M, Zresult.D, Zop1.D" + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "EOR Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector Xor(Vector left, Vector right); + + + /// XorAcross : Bitwise exclusive OR reduction to scalar + + /// int8_t sveorv[_s8](svbool_t pg, svint8_t op) : "EORV Bresult, Pg, Zop.B" + public static unsafe Vector XorAcross(Vector value); + + /// int16_t sveorv[_s16](svbool_t pg, svint16_t op) : "EORV Hresult, Pg, Zop.H" + public static unsafe Vector XorAcross(Vector value); + + /// int32_t sveorv[_s32](svbool_t pg, svint32_t op) : "EORV Sresult, Pg, Zop.S" + public static unsafe Vector XorAcross(Vector value); + + /// int64_t sveorv[_s64](svbool_t pg, svint64_t op) : "EORV Dresult, Pg, Zop.D" + public static unsafe Vector XorAcross(Vector value); + + /// uint8_t sveorv[_u8](svbool_t pg, svuint8_t op) : "EORV Bresult, Pg, Zop.B" + public static unsafe Vector XorAcross(Vector value); + + /// uint16_t sveorv[_u16](svbool_t pg, svuint16_t op) : "EORV Hresult, Pg, Zop.H" + public static unsafe Vector XorAcross(Vector value); + + /// uint32_t sveorv[_u32](svbool_t pg, svuint32_t op) : "EORV Sresult, Pg, Zop.S" + public static unsafe Vector XorAcross(Vector value); + + /// uint64_t sveorv[_u64](svbool_t pg, svuint64_t op) : "EORV Dresult, Pg, Zop.D" + public static unsafe Vector XorAcross(Vector value); + + + /// total method signatures: 130 + /// total method names: 16 +} + + /// Optional Entries: + /// public static unsafe Vector ShiftRightArithmetic(Vector left, byte right); // svasr[_n_s8]_m or svasr[_n_s8]_x or svasr[_n_s8]_z + /// public static unsafe Vector ShiftRightArithmetic(Vector left, ushort right); // svasr[_n_s16]_m or svasr[_n_s16]_x or svasr[_n_s16]_z + /// public static unsafe Vector ShiftRightArithmetic(Vector left, uint right); // svasr[_n_s32]_m or svasr[_n_s32]_x or svasr[_n_s32]_z + /// public static unsafe Vector ShiftRightArithmetic(Vector left, ulong right); // svasr[_n_s64]_m or svasr[_n_s64]_x or svasr[_n_s64]_z + /// public static unsafe Vector ShiftRightArithmetic(Vector left, ulong right); // svasr_wide[_n_s8]_m or svasr_wide[_n_s8]_x or svasr_wide[_n_s8]_z + /// public static unsafe Vector ShiftRightArithmetic(Vector left, ulong right); // svasr_wide[_n_s16]_m or svasr_wide[_n_s16]_x or svasr_wide[_n_s16]_z + /// public static unsafe Vector ShiftRightArithmetic(Vector left, ulong right); // svasr_wide[_n_s32]_m or svasr_wide[_n_s32]_x or svasr_wide[_n_s32]_z + /// Total Maybe: 7 + + /// Rejected: + /// public static unsafe Vector And(Vector left, sbyte right); // svand[_n_s8]_m or svand[_n_s8]_x or svand[_n_s8]_z + /// public static unsafe Vector And(Vector left, short right); // svand[_n_s16]_m or svand[_n_s16]_x or svand[_n_s16]_z + /// public static unsafe Vector And(Vector left, int right); // svand[_n_s32]_m or svand[_n_s32]_x or svand[_n_s32]_z + /// public static unsafe Vector And(Vector left, long right); // svand[_n_s64]_m or svand[_n_s64]_x or svand[_n_s64]_z + /// public static unsafe Vector And(Vector left, byte right); // svand[_n_u8]_m or svand[_n_u8]_x or svand[_n_u8]_z + /// public static unsafe Vector And(Vector left, ushort right); // svand[_n_u16]_m or svand[_n_u16]_x or svand[_n_u16]_z + /// public static unsafe Vector And(Vector left, uint right); // svand[_n_u32]_m or svand[_n_u32]_x or svand[_n_u32]_z + /// public static unsafe Vector And(Vector left, ulong right); // svand[_n_u64]_m or svand[_n_u64]_x or svand[_n_u64]_z + /// public static unsafe Vector BitwiseClear(Vector left, sbyte right); // svbic[_n_s8]_m or svbic[_n_s8]_x or svbic[_n_s8]_z + /// public static unsafe Vector BitwiseClear(Vector left, short right); // svbic[_n_s16]_m or svbic[_n_s16]_x or svbic[_n_s16]_z + /// public static unsafe Vector BitwiseClear(Vector left, int right); // svbic[_n_s32]_m or svbic[_n_s32]_x or svbic[_n_s32]_z + /// public static unsafe Vector BitwiseClear(Vector left, long right); // svbic[_n_s64]_m or svbic[_n_s64]_x or svbic[_n_s64]_z + /// public static unsafe Vector BitwiseClear(Vector left, byte right); // svbic[_n_u8]_m or svbic[_n_u8]_x or svbic[_n_u8]_z + /// public static unsafe Vector BitwiseClear(Vector left, ushort right); // svbic[_n_u16]_m or svbic[_n_u16]_x or svbic[_n_u16]_z + /// public static unsafe Vector BitwiseClear(Vector left, uint right); // svbic[_n_u32]_m or svbic[_n_u32]_x or svbic[_n_u32]_z + /// public static unsafe Vector BitwiseClear(Vector left, ulong right); // svbic[_n_u64]_m or svbic[_n_u64]_x or svbic[_n_u64]_z + /// public static unsafe Vector Or(Vector left, sbyte right); // svorr[_n_s8]_m or svorr[_n_s8]_x or svorr[_n_s8]_z + /// public static unsafe Vector Or(Vector left, short right); // svorr[_n_s16]_m or svorr[_n_s16]_x or svorr[_n_s16]_z + /// public static unsafe Vector Or(Vector left, int right); // svorr[_n_s32]_m or svorr[_n_s32]_x or svorr[_n_s32]_z + /// public static unsafe Vector Or(Vector left, long right); // svorr[_n_s64]_m or svorr[_n_s64]_x or svorr[_n_s64]_z + /// public static unsafe Vector Or(Vector left, byte right); // svorr[_n_u8]_m or svorr[_n_u8]_x or svorr[_n_u8]_z + /// public static unsafe Vector Or(Vector left, ushort right); // svorr[_n_u16]_m or svorr[_n_u16]_x or svorr[_n_u16]_z + /// public static unsafe Vector Or(Vector left, uint right); // svorr[_n_u32]_m or svorr[_n_u32]_x or svorr[_n_u32]_z + /// public static unsafe Vector Or(Vector left, ulong right); // svorr[_n_u64]_m or svorr[_n_u64]_x or svorr[_n_u64]_z + /// public static unsafe Vector ShiftLeftLogical(Vector left, byte right); // svlsl[_n_s8]_m or svlsl[_n_s8]_x or svlsl[_n_s8]_z + /// public static unsafe Vector ShiftLeftLogical(Vector left, ushort right); // svlsl[_n_s16]_m or svlsl[_n_s16]_x or svlsl[_n_s16]_z + /// public static unsafe Vector ShiftLeftLogical(Vector left, uint right); // svlsl[_n_s32]_m or svlsl[_n_s32]_x or svlsl[_n_s32]_z + /// public static unsafe Vector ShiftLeftLogical(Vector left, ulong right); // svlsl[_n_s64]_m or svlsl[_n_s64]_x or svlsl[_n_s64]_z + /// public static unsafe Vector ShiftLeftLogical(Vector left, byte right); // svlsl[_n_u8]_m or svlsl[_n_u8]_x or svlsl[_n_u8]_z + /// public static unsafe Vector ShiftLeftLogical(Vector left, ushort right); // svlsl[_n_u16]_m or svlsl[_n_u16]_x or svlsl[_n_u16]_z + /// public static unsafe Vector ShiftLeftLogical(Vector left, uint right); // svlsl[_n_u32]_m or svlsl[_n_u32]_x or svlsl[_n_u32]_z + /// public static unsafe Vector ShiftLeftLogical(Vector left, ulong right); // svlsl[_n_u64]_m or svlsl[_n_u64]_x or svlsl[_n_u64]_z + /// public static unsafe Vector ShiftLeftLogical(Vector left, ulong right); // svlsl_wide[_n_s8]_m or svlsl_wide[_n_s8]_x or svlsl_wide[_n_s8]_z + /// public static unsafe Vector ShiftLeftLogical(Vector left, ulong right); // svlsl_wide[_n_s16]_m or svlsl_wide[_n_s16]_x or svlsl_wide[_n_s16]_z + /// public static unsafe Vector ShiftLeftLogical(Vector left, ulong right); // svlsl_wide[_n_s32]_m or svlsl_wide[_n_s32]_x or svlsl_wide[_n_s32]_z + /// public static unsafe Vector ShiftLeftLogical(Vector left, ulong right); // svlsl_wide[_n_u8]_m or svlsl_wide[_n_u8]_x or svlsl_wide[_n_u8]_z + /// public static unsafe Vector ShiftLeftLogical(Vector left, ulong right); // svlsl_wide[_n_u16]_m or svlsl_wide[_n_u16]_x or svlsl_wide[_n_u16]_z + /// public static unsafe Vector ShiftLeftLogical(Vector left, ulong right); // svlsl_wide[_n_u32]_m or svlsl_wide[_n_u32]_x or svlsl_wide[_n_u32]_z + /// public static unsafe Vector ShiftRightLogical(Vector left, byte right); // svlsr[_n_u8]_m or svlsr[_n_u8]_x or svlsr[_n_u8]_z + /// public static unsafe Vector ShiftRightLogical(Vector left, ushort right); // svlsr[_n_u16]_m or svlsr[_n_u16]_x or svlsr[_n_u16]_z + /// public static unsafe Vector ShiftRightLogical(Vector left, uint right); // svlsr[_n_u32]_m or svlsr[_n_u32]_x or svlsr[_n_u32]_z + /// public static unsafe Vector ShiftRightLogical(Vector left, ulong right); // svlsr[_n_u64]_m or svlsr[_n_u64]_x or svlsr[_n_u64]_z + /// public static unsafe Vector ShiftRightLogical(Vector left, ulong right); // svlsr_wide[_n_u8]_m or svlsr_wide[_n_u8]_x or svlsr_wide[_n_u8]_z + /// public static unsafe Vector ShiftRightLogical(Vector left, ulong right); // svlsr_wide[_n_u16]_m or svlsr_wide[_n_u16]_x or svlsr_wide[_n_u16]_z + /// public static unsafe Vector ShiftRightLogical(Vector left, ulong right); // svlsr_wide[_n_u32]_m or svlsr_wide[_n_u32]_x or svlsr_wide[_n_u32]_z + /// public static unsafe Vector Xor(Vector left, sbyte right); // sveor[_n_s8]_m or sveor[_n_s8]_x or sveor[_n_s8]_z + /// public static unsafe Vector Xor(Vector left, short right); // sveor[_n_s16]_m or sveor[_n_s16]_x or sveor[_n_s16]_z + /// public static unsafe Vector Xor(Vector left, int right); // sveor[_n_s32]_m or sveor[_n_s32]_x or sveor[_n_s32]_z + /// public static unsafe Vector Xor(Vector left, long right); // sveor[_n_s64]_m or sveor[_n_s64]_x or sveor[_n_s64]_z + /// public static unsafe Vector Xor(Vector left, byte right); // sveor[_n_u8]_m or sveor[_n_u8]_x or sveor[_n_u8]_z + /// public static unsafe Vector Xor(Vector left, ushort right); // sveor[_n_u16]_m or sveor[_n_u16]_x or sveor[_n_u16]_z + /// public static unsafe Vector Xor(Vector left, uint right); // sveor[_n_u32]_m or sveor[_n_u32]_x or sveor[_n_u32]_z + /// public static unsafe Vector Xor(Vector left, ulong right); // sveor[_n_u64]_m or sveor[_n_u64]_x or sveor[_n_u64]_z + /// Total Rejected: 53 + + /// Total ACLE covered across API: 518 + diff --git a/sve_api/out_api/apiraw_FEAT_SVE__counting.cs b/sve_api/out_api/apiraw_FEAT_SVE__counting.cs new file mode 100644 index 0000000000000..74f5bfecfb0d4 --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_SVE__counting.cs @@ -0,0 +1,713 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: counting +{ + + public static unsafe ulong Count16BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // CNTH + + public static unsafe ulong Count32BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // CNTW + + public static unsafe ulong Count64BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // CNTD + + public static unsafe ulong Count8BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // CNTB + + /// T: byte, sbyte, short, int, long, float, double, ushort, uint, ulong + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from); // CNTP + + /// T: [byte, sbyte], [ushort, short], [uint, int], [ulong, long] + public static unsafe Vector LeadingSignCount(Vector value); // CLS // predicated, MOVPRFX + + /// T: [byte, sbyte], [ushort, short], [uint, int], [ulong, long] + public static unsafe Vector LeadingZeroCount(Vector value); // CLZ // predicated, MOVPRFX + + /// T: byte, ushort, uint, ulong + public static unsafe Vector LeadingZeroCount(Vector value); // CLZ // predicated, MOVPRFX + + /// T: [uint, float], [ulong, double], [byte, sbyte], [ushort, short], [uint, int], [ulong, long] + public static unsafe Vector PopCount(Vector value); // CNT // predicated, MOVPRFX + + /// T: byte, ushort, uint, ulong + public static unsafe Vector PopCount(Vector value); // CNT // predicated, MOVPRFX + + public static unsafe int SaturatingDecrementBy16BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECH + + public static unsafe long SaturatingDecrementBy16BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECH + + public static unsafe uint SaturatingDecrementBy16BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQDECH + + public static unsafe ulong SaturatingDecrementBy16BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQDECH + + /// T: short, ushort + public static unsafe Vector SaturatingDecrementBy16BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECH or UQDECH // MOVPRFX + + public static unsafe int SaturatingDecrementBy32BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECW + + public static unsafe long SaturatingDecrementBy32BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECW + + public static unsafe uint SaturatingDecrementBy32BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQDECW + + public static unsafe ulong SaturatingDecrementBy32BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQDECW + + /// T: int, uint + public static unsafe Vector SaturatingDecrementBy32BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECW or UQDECW // MOVPRFX + + public static unsafe int SaturatingDecrementBy64BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECD + + public static unsafe long SaturatingDecrementBy64BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECD + + public static unsafe uint SaturatingDecrementBy64BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQDECD + + public static unsafe ulong SaturatingDecrementBy64BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQDECD + + /// T: long, ulong + public static unsafe Vector SaturatingDecrementBy64BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECD or UQDECD // MOVPRFX + + public static unsafe int SaturatingDecrementBy8BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECB + + public static unsafe long SaturatingDecrementBy8BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECB + + public static unsafe uint SaturatingDecrementBy8BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQDECB + + public static unsafe ulong SaturatingDecrementBy8BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQDECB + + /// T: byte, ushort, uint, ulong + public static unsafe int SaturatingDecrementByActiveElementCount(int value, Vector from); // SQDECP + + /// T: byte, ushort, uint, ulong + public static unsafe long SaturatingDecrementByActiveElementCount(long value, Vector from); // SQDECP + + /// T: byte, ushort, uint, ulong + public static unsafe uint SaturatingDecrementByActiveElementCount(uint value, Vector from); // UQDECP + + /// T: byte, ushort, uint, ulong + public static unsafe ulong SaturatingDecrementByActiveElementCount(ulong value, Vector from); // UQDECP + + /// T: short, int, long, ushort, uint, ulong + public static unsafe Vector SaturatingDecrementByActiveElementCount(Vector value, Vector from); // SQDECP or UQDECP // MOVPRFX + + public static unsafe int SaturatingIncrementBy16BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCH + + public static unsafe long SaturatingIncrementBy16BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCH + + public static unsafe uint SaturatingIncrementBy16BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQINCH + + public static unsafe ulong SaturatingIncrementBy16BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQINCH + + /// T: short, ushort + public static unsafe Vector SaturatingIncrementBy16BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCH or UQINCH // MOVPRFX + + public static unsafe int SaturatingIncrementBy32BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCW + + public static unsafe long SaturatingIncrementBy32BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCW + + public static unsafe uint SaturatingIncrementBy32BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQINCW + + public static unsafe ulong SaturatingIncrementBy32BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQINCW + + /// T: int, uint + public static unsafe Vector SaturatingIncrementBy32BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCW or UQINCW // MOVPRFX + + public static unsafe int SaturatingIncrementBy64BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCD + + public static unsafe long SaturatingIncrementBy64BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCD + + public static unsafe uint SaturatingIncrementBy64BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQINCD + + public static unsafe ulong SaturatingIncrementBy64BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQINCD + + /// T: long, ulong + public static unsafe Vector SaturatingIncrementBy64BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCD or UQINCD // MOVPRFX + + public static unsafe int SaturatingIncrementBy8BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCB + + public static unsafe long SaturatingIncrementBy8BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCB + + public static unsafe uint SaturatingIncrementBy8BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQINCB + + public static unsafe ulong SaturatingIncrementBy8BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQINCB + + /// T: byte, ushort, uint, ulong + public static unsafe int SaturatingIncrementByActiveElementCount(int value, Vector from); // SQINCP + + /// T: byte, ushort, uint, ulong + public static unsafe long SaturatingIncrementByActiveElementCount(long value, Vector from); // SQINCP + + /// T: byte, ushort, uint, ulong + public static unsafe uint SaturatingIncrementByActiveElementCount(uint value, Vector from); // UQINCP + + /// T: byte, ushort, uint, ulong + public static unsafe ulong SaturatingIncrementByActiveElementCount(ulong value, Vector from); // UQINCP + + /// T: short, int, long, ushort, uint, ulong + public static unsafe Vector SaturatingIncrementByActiveElementCount(Vector value, Vector from); // SQINCP or UQINCP // MOVPRFX + + + // All patterns used by PTRUE. + public enum SveMaskPattern : byte + { + LargestPowerOf2 = 0, // The largest power of 2. + VectorCount1 = 1, // 1 element. + VectorCount2 = 2, // 2 elements. + VectorCount3 = 3, // 3 elements. + VectorCount4 = 4, // 4 elements. + VectorCount5 = 5, // 5 elements. + VectorCount6 = 6, // 6 elements. + VectorCount7 = 7, // 7 elements. + VectorCount8 = 8, // 8 elements. + VectorCount16 = 9, // 16 elements. + VectorCount32 = 10, // 32 elements. + VectorCount64 = 11, // 64 elements. + VectorCount128 = 12, // 128 elements. + VectorCount256 = 13, // 256 elements. + LargestMultipleOf4 = 29, // The largest multiple of 4. + LargestMultipleOf3 = 30, // The largest multiple of 3. + All = 31 // All available (implicitly a multiple of two). + }; + + /// total method signatures: 58 + +} + + +/// Full API +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: counting +{ + /// Count16BitElements : Count the number of 16-bit elements in a vector + + /// uint64_t svcnth_pat(enum svpattern pattern) : "CNTH Xresult, pattern" + public static unsafe ulong Count16BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + + /// Count32BitElements : Count the number of 32-bit elements in a vector + + /// uint64_t svcntw_pat(enum svpattern pattern) : "CNTW Xresult, pattern" + public static unsafe ulong Count32BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + + /// Count64BitElements : Count the number of 64-bit elements in a vector + + /// uint64_t svcntd_pat(enum svpattern pattern) : "CNTD Xresult, pattern" + public static unsafe ulong Count64BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + + /// Count8BitElements : Count the number of 8-bit elements in a vector + + /// uint64_t svcntb_pat(enum svpattern pattern) : "CNTB Xresult, pattern" + public static unsafe ulong Count8BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + + /// GetActiveElementCount : Count set predicate bits + + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) : "CNTP Xresult, Pg, Pop.B" + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from); + + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) : "CNTP Xresult, Pg, Pop.B" + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from); + + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) : "CNTP Xresult, Pg, Pop.B" + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from); + + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) : "CNTP Xresult, Pg, Pop.B" + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from); + + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) : "CNTP Xresult, Pg, Pop.B" + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from); + + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) : "CNTP Xresult, Pg, Pop.B" + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from); + + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) : "CNTP Xresult, Pg, Pop.B" + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from); + + /// uint64_t svcntp_b16(svbool_t pg, svbool_t op) : "CNTP Xresult, Pg, Pop.H" + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from); + + /// uint64_t svcntp_b32(svbool_t pg, svbool_t op) : "CNTP Xresult, Pg, Pop.S" + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from); + + /// uint64_t svcntp_b64(svbool_t pg, svbool_t op) : "CNTP Xresult, Pg, Pop.D" + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from); + + + /// LeadingSignCount : Count leading sign bits + + /// svuint8_t svcls[_s8]_m(svuint8_t inactive, svbool_t pg, svint8_t op) : "CLS Ztied.B, Pg/M, Zop.B" or "MOVPRFX Zresult, Zinactive; CLS Zresult.B, Pg/M, Zop.B" + /// svuint8_t svcls[_s8]_x(svbool_t pg, svint8_t op) : "CLS Ztied.B, Pg/M, Ztied.B" or "MOVPRFX Zresult, Zop; CLS Zresult.B, Pg/M, Zop.B" + /// svuint8_t svcls[_s8]_z(svbool_t pg, svint8_t op) : "MOVPRFX Zresult.B, Pg/Z, Zop.B; CLS Zresult.B, Pg/M, Zop.B" + public static unsafe Vector LeadingSignCount(Vector value); + + /// svuint16_t svcls[_s16]_m(svuint16_t inactive, svbool_t pg, svint16_t op) : "CLS Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; CLS Zresult.H, Pg/M, Zop.H" + /// svuint16_t svcls[_s16]_x(svbool_t pg, svint16_t op) : "CLS Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; CLS Zresult.H, Pg/M, Zop.H" + /// svuint16_t svcls[_s16]_z(svbool_t pg, svint16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; CLS Zresult.H, Pg/M, Zop.H" + public static unsafe Vector LeadingSignCount(Vector value); + + /// svuint32_t svcls[_s32]_m(svuint32_t inactive, svbool_t pg, svint32_t op) : "CLS Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; CLS Zresult.S, Pg/M, Zop.S" + /// svuint32_t svcls[_s32]_x(svbool_t pg, svint32_t op) : "CLS Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; CLS Zresult.S, Pg/M, Zop.S" + /// svuint32_t svcls[_s32]_z(svbool_t pg, svint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; CLS Zresult.S, Pg/M, Zop.S" + public static unsafe Vector LeadingSignCount(Vector value); + + /// svuint64_t svcls[_s64]_m(svuint64_t inactive, svbool_t pg, svint64_t op) : "CLS Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; CLS Zresult.D, Pg/M, Zop.D" + /// svuint64_t svcls[_s64]_x(svbool_t pg, svint64_t op) : "CLS Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; CLS Zresult.D, Pg/M, Zop.D" + /// svuint64_t svcls[_s64]_z(svbool_t pg, svint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; CLS Zresult.D, Pg/M, Zop.D" + public static unsafe Vector LeadingSignCount(Vector value); + + + /// LeadingZeroCount : Count leading zero bits + + /// svuint8_t svclz[_s8]_m(svuint8_t inactive, svbool_t pg, svint8_t op) : "CLZ Ztied.B, Pg/M, Zop.B" or "MOVPRFX Zresult, Zinactive; CLZ Zresult.B, Pg/M, Zop.B" + /// svuint8_t svclz[_s8]_x(svbool_t pg, svint8_t op) : "CLZ Ztied.B, Pg/M, Ztied.B" or "MOVPRFX Zresult, Zop; CLZ Zresult.B, Pg/M, Zop.B" + /// svuint8_t svclz[_s8]_z(svbool_t pg, svint8_t op) : "MOVPRFX Zresult.B, Pg/Z, Zop.B; CLZ Zresult.B, Pg/M, Zop.B" + public static unsafe Vector LeadingZeroCount(Vector value); + + /// svuint16_t svclz[_s16]_m(svuint16_t inactive, svbool_t pg, svint16_t op) : "CLZ Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; CLZ Zresult.H, Pg/M, Zop.H" + /// svuint16_t svclz[_s16]_x(svbool_t pg, svint16_t op) : "CLZ Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; CLZ Zresult.H, Pg/M, Zop.H" + /// svuint16_t svclz[_s16]_z(svbool_t pg, svint16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; CLZ Zresult.H, Pg/M, Zop.H" + public static unsafe Vector LeadingZeroCount(Vector value); + + /// svuint32_t svclz[_s32]_m(svuint32_t inactive, svbool_t pg, svint32_t op) : "CLZ Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; CLZ Zresult.S, Pg/M, Zop.S" + /// svuint32_t svclz[_s32]_x(svbool_t pg, svint32_t op) : "CLZ Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; CLZ Zresult.S, Pg/M, Zop.S" + /// svuint32_t svclz[_s32]_z(svbool_t pg, svint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; CLZ Zresult.S, Pg/M, Zop.S" + public static unsafe Vector LeadingZeroCount(Vector value); + + /// svuint64_t svclz[_s64]_m(svuint64_t inactive, svbool_t pg, svint64_t op) : "CLZ Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; CLZ Zresult.D, Pg/M, Zop.D" + /// svuint64_t svclz[_s64]_x(svbool_t pg, svint64_t op) : "CLZ Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; CLZ Zresult.D, Pg/M, Zop.D" + /// svuint64_t svclz[_s64]_z(svbool_t pg, svint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; CLZ Zresult.D, Pg/M, Zop.D" + public static unsafe Vector LeadingZeroCount(Vector value); + + /// svuint8_t svclz[_u8]_m(svuint8_t inactive, svbool_t pg, svuint8_t op) : "CLZ Ztied.B, Pg/M, Zop.B" or "MOVPRFX Zresult, Zinactive; CLZ Zresult.B, Pg/M, Zop.B" + /// svuint8_t svclz[_u8]_x(svbool_t pg, svuint8_t op) : "CLZ Ztied.B, Pg/M, Ztied.B" or "MOVPRFX Zresult, Zop; CLZ Zresult.B, Pg/M, Zop.B" + /// svuint8_t svclz[_u8]_z(svbool_t pg, svuint8_t op) : "MOVPRFX Zresult.B, Pg/Z, Zop.B; CLZ Zresult.B, Pg/M, Zop.B" + public static unsafe Vector LeadingZeroCount(Vector value); + + /// svuint16_t svclz[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) : "CLZ Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; CLZ Zresult.H, Pg/M, Zop.H" + /// svuint16_t svclz[_u16]_x(svbool_t pg, svuint16_t op) : "CLZ Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; CLZ Zresult.H, Pg/M, Zop.H" + /// svuint16_t svclz[_u16]_z(svbool_t pg, svuint16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; CLZ Zresult.H, Pg/M, Zop.H" + public static unsafe Vector LeadingZeroCount(Vector value); + + /// svuint32_t svclz[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) : "CLZ Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; CLZ Zresult.S, Pg/M, Zop.S" + /// svuint32_t svclz[_u32]_x(svbool_t pg, svuint32_t op) : "CLZ Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; CLZ Zresult.S, Pg/M, Zop.S" + /// svuint32_t svclz[_u32]_z(svbool_t pg, svuint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; CLZ Zresult.S, Pg/M, Zop.S" + public static unsafe Vector LeadingZeroCount(Vector value); + + /// svuint64_t svclz[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) : "CLZ Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; CLZ Zresult.D, Pg/M, Zop.D" + /// svuint64_t svclz[_u64]_x(svbool_t pg, svuint64_t op) : "CLZ Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; CLZ Zresult.D, Pg/M, Zop.D" + /// svuint64_t svclz[_u64]_z(svbool_t pg, svuint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; CLZ Zresult.D, Pg/M, Zop.D" + public static unsafe Vector LeadingZeroCount(Vector value); + + + /// PopCount : Count nonzero bits + + /// svuint32_t svcnt[_f32]_m(svuint32_t inactive, svbool_t pg, svfloat32_t op) : "CNT Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; CNT Zresult.S, Pg/M, Zop.S" + /// svuint32_t svcnt[_f32]_x(svbool_t pg, svfloat32_t op) : "CNT Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; CNT Zresult.S, Pg/M, Zop.S" + /// svuint32_t svcnt[_f32]_z(svbool_t pg, svfloat32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; CNT Zresult.S, Pg/M, Zop.S" + public static unsafe Vector PopCount(Vector value); + + /// svuint64_t svcnt[_f64]_m(svuint64_t inactive, svbool_t pg, svfloat64_t op) : "CNT Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; CNT Zresult.D, Pg/M, Zop.D" + /// svuint64_t svcnt[_f64]_x(svbool_t pg, svfloat64_t op) : "CNT Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; CNT Zresult.D, Pg/M, Zop.D" + /// svuint64_t svcnt[_f64]_z(svbool_t pg, svfloat64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; CNT Zresult.D, Pg/M, Zop.D" + public static unsafe Vector PopCount(Vector value); + + /// svuint8_t svcnt[_s8]_m(svuint8_t inactive, svbool_t pg, svint8_t op) : "CNT Ztied.B, Pg/M, Zop.B" or "MOVPRFX Zresult, Zinactive; CNT Zresult.B, Pg/M, Zop.B" + /// svuint8_t svcnt[_s8]_x(svbool_t pg, svint8_t op) : "CNT Ztied.B, Pg/M, Ztied.B" or "MOVPRFX Zresult, Zop; CNT Zresult.B, Pg/M, Zop.B" + /// svuint8_t svcnt[_s8]_z(svbool_t pg, svint8_t op) : "MOVPRFX Zresult.B, Pg/Z, Zop.B; CNT Zresult.B, Pg/M, Zop.B" + public static unsafe Vector PopCount(Vector value); + + /// svuint16_t svcnt[_s16]_m(svuint16_t inactive, svbool_t pg, svint16_t op) : "CNT Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; CNT Zresult.H, Pg/M, Zop.H" + /// svuint16_t svcnt[_s16]_x(svbool_t pg, svint16_t op) : "CNT Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; CNT Zresult.H, Pg/M, Zop.H" + /// svuint16_t svcnt[_s16]_z(svbool_t pg, svint16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; CNT Zresult.H, Pg/M, Zop.H" + public static unsafe Vector PopCount(Vector value); + + /// svuint32_t svcnt[_s32]_m(svuint32_t inactive, svbool_t pg, svint32_t op) : "CNT Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; CNT Zresult.S, Pg/M, Zop.S" + /// svuint32_t svcnt[_s32]_x(svbool_t pg, svint32_t op) : "CNT Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; CNT Zresult.S, Pg/M, Zop.S" + /// svuint32_t svcnt[_s32]_z(svbool_t pg, svint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; CNT Zresult.S, Pg/M, Zop.S" + public static unsafe Vector PopCount(Vector value); + + /// svuint64_t svcnt[_s64]_m(svuint64_t inactive, svbool_t pg, svint64_t op) : "CNT Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; CNT Zresult.D, Pg/M, Zop.D" + /// svuint64_t svcnt[_s64]_x(svbool_t pg, svint64_t op) : "CNT Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; CNT Zresult.D, Pg/M, Zop.D" + /// svuint64_t svcnt[_s64]_z(svbool_t pg, svint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; CNT Zresult.D, Pg/M, Zop.D" + public static unsafe Vector PopCount(Vector value); + + /// svuint8_t svcnt[_u8]_m(svuint8_t inactive, svbool_t pg, svuint8_t op) : "CNT Ztied.B, Pg/M, Zop.B" or "MOVPRFX Zresult, Zinactive; CNT Zresult.B, Pg/M, Zop.B" + /// svuint8_t svcnt[_u8]_x(svbool_t pg, svuint8_t op) : "CNT Ztied.B, Pg/M, Ztied.B" or "MOVPRFX Zresult, Zop; CNT Zresult.B, Pg/M, Zop.B" + /// svuint8_t svcnt[_u8]_z(svbool_t pg, svuint8_t op) : "MOVPRFX Zresult.B, Pg/Z, Zop.B; CNT Zresult.B, Pg/M, Zop.B" + public static unsafe Vector PopCount(Vector value); + + /// svuint16_t svcnt[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) : "CNT Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; CNT Zresult.H, Pg/M, Zop.H" + /// svuint16_t svcnt[_u16]_x(svbool_t pg, svuint16_t op) : "CNT Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; CNT Zresult.H, Pg/M, Zop.H" + /// svuint16_t svcnt[_u16]_z(svbool_t pg, svuint16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; CNT Zresult.H, Pg/M, Zop.H" + public static unsafe Vector PopCount(Vector value); + + /// svuint32_t svcnt[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) : "CNT Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; CNT Zresult.S, Pg/M, Zop.S" + /// svuint32_t svcnt[_u32]_x(svbool_t pg, svuint32_t op) : "CNT Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; CNT Zresult.S, Pg/M, Zop.S" + /// svuint32_t svcnt[_u32]_z(svbool_t pg, svuint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; CNT Zresult.S, Pg/M, Zop.S" + public static unsafe Vector PopCount(Vector value); + + /// svuint64_t svcnt[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) : "CNT Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; CNT Zresult.D, Pg/M, Zop.D" + /// svuint64_t svcnt[_u64]_x(svbool_t pg, svuint64_t op) : "CNT Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; CNT Zresult.D, Pg/M, Zop.D" + /// svuint64_t svcnt[_u64]_z(svbool_t pg, svuint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; CNT Zresult.D, Pg/M, Zop.D" + public static unsafe Vector PopCount(Vector value); + + + /// SaturatingDecrementBy16BitElementCount : Saturating decrement by number of halfword elements + + /// int32_t svqdech_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) : "SQDECH Xtied, Wtied, pattern, MUL #imm_factor" + public static unsafe int SaturatingDecrementBy16BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// int64_t svqdech_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) : "SQDECH Xtied, pattern, MUL #imm_factor" + public static unsafe long SaturatingDecrementBy16BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// uint32_t svqdech_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) : "UQDECH Wtied, pattern, MUL #imm_factor" + public static unsafe uint SaturatingDecrementBy16BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// uint64_t svqdech_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) : "UQDECH Xtied, pattern, MUL #imm_factor" + public static unsafe ulong SaturatingDecrementBy16BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// svint16_t svqdech_pat[_s16](svint16_t op, enum svpattern pattern, uint64_t imm_factor) : "SQDECH Ztied.H, pattern, MUL #imm_factor" or "MOVPRFX Zresult, Zop; SQDECH Zresult.H, pattern, MUL #imm_factor" + public static unsafe Vector SaturatingDecrementBy16BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// svuint16_t svqdech_pat[_u16](svuint16_t op, enum svpattern pattern, uint64_t imm_factor) : "UQDECH Ztied.H, pattern, MUL #imm_factor" or "MOVPRFX Zresult, Zop; UQDECH Zresult.H, pattern, MUL #imm_factor" + public static unsafe Vector SaturatingDecrementBy16BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + + /// SaturatingDecrementBy32BitElementCount : Saturating decrement by number of word elements + + /// int32_t svqdecw_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) : "SQDECW Xtied, Wtied, pattern, MUL #imm_factor" + public static unsafe int SaturatingDecrementBy32BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// int64_t svqdecw_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) : "SQDECW Xtied, pattern, MUL #imm_factor" + public static unsafe long SaturatingDecrementBy32BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// uint32_t svqdecw_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) : "UQDECW Wtied, pattern, MUL #imm_factor" + public static unsafe uint SaturatingDecrementBy32BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// uint64_t svqdecw_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) : "UQDECW Xtied, pattern, MUL #imm_factor" + public static unsafe ulong SaturatingDecrementBy32BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// svint32_t svqdecw_pat[_s32](svint32_t op, enum svpattern pattern, uint64_t imm_factor) : "SQDECW Ztied.S, pattern, MUL #imm_factor" or "MOVPRFX Zresult, Zop; SQDECW Zresult.S, pattern, MUL #imm_factor" + public static unsafe Vector SaturatingDecrementBy32BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// svuint32_t svqdecw_pat[_u32](svuint32_t op, enum svpattern pattern, uint64_t imm_factor) : "UQDECW Ztied.S, pattern, MUL #imm_factor" or "MOVPRFX Zresult, Zop; UQDECW Zresult.S, pattern, MUL #imm_factor" + public static unsafe Vector SaturatingDecrementBy32BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + + /// SaturatingDecrementBy64BitElementCount : Saturating decrement by number of doubleword elements + + /// int32_t svqdecd_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) : "SQDECD Xtied, Wtied, pattern, MUL #imm_factor" + public static unsafe int SaturatingDecrementBy64BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// int64_t svqdecd_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) : "SQDECD Xtied, pattern, MUL #imm_factor" + public static unsafe long SaturatingDecrementBy64BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// uint32_t svqdecd_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) : "UQDECD Wtied, pattern, MUL #imm_factor" + public static unsafe uint SaturatingDecrementBy64BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// uint64_t svqdecd_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) : "UQDECD Xtied, pattern, MUL #imm_factor" + public static unsafe ulong SaturatingDecrementBy64BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// svint64_t svqdecd_pat[_s64](svint64_t op, enum svpattern pattern, uint64_t imm_factor) : "SQDECD Ztied.D, pattern, MUL #imm_factor" or "MOVPRFX Zresult, Zop; SQDECD Zresult.D, pattern, MUL #imm_factor" + public static unsafe Vector SaturatingDecrementBy64BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// svuint64_t svqdecd_pat[_u64](svuint64_t op, enum svpattern pattern, uint64_t imm_factor) : "UQDECD Ztied.D, pattern, MUL #imm_factor" or "MOVPRFX Zresult, Zop; UQDECD Zresult.D, pattern, MUL #imm_factor" + public static unsafe Vector SaturatingDecrementBy64BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + + /// SaturatingDecrementBy8BitElementCount : Saturating decrement by number of byte elements + + /// int32_t svqdecb_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) : "SQDECB Xtied, Wtied, pattern, MUL #imm_factor" + public static unsafe int SaturatingDecrementBy8BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// int64_t svqdecb_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) : "SQDECB Xtied, pattern, MUL #imm_factor" + public static unsafe long SaturatingDecrementBy8BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// uint32_t svqdecb_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) : "UQDECB Wtied, pattern, MUL #imm_factor" + public static unsafe uint SaturatingDecrementBy8BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// uint64_t svqdecb_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) : "UQDECB Xtied, pattern, MUL #imm_factor" + public static unsafe ulong SaturatingDecrementBy8BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + + /// SaturatingDecrementByActiveElementCount : Saturating decrement by active element count + + /// int32_t svqdecp[_n_s32]_b8(int32_t op, svbool_t pg) : "SQDECP Xtied, Pg.B, Wtied" + public static unsafe int SaturatingDecrementByActiveElementCount(int value, Vector from); + + /// int32_t svqdecp[_n_s32]_b16(int32_t op, svbool_t pg) : "SQDECP Xtied, Pg.H, Wtied" + public static unsafe int SaturatingDecrementByActiveElementCount(int value, Vector from); + + /// int32_t svqdecp[_n_s32]_b32(int32_t op, svbool_t pg) : "SQDECP Xtied, Pg.S, Wtied" + public static unsafe int SaturatingDecrementByActiveElementCount(int value, Vector from); + + /// int32_t svqdecp[_n_s32]_b64(int32_t op, svbool_t pg) : "SQDECP Xtied, Pg.D, Wtied" + public static unsafe int SaturatingDecrementByActiveElementCount(int value, Vector from); + + /// int64_t svqdecp[_n_s64]_b8(int64_t op, svbool_t pg) : "SQDECP Xtied, Pg.B" + public static unsafe long SaturatingDecrementByActiveElementCount(long value, Vector from); + + /// int64_t svqdecp[_n_s64]_b16(int64_t op, svbool_t pg) : "SQDECP Xtied, Pg.H" + public static unsafe long SaturatingDecrementByActiveElementCount(long value, Vector from); + + /// int64_t svqdecp[_n_s64]_b32(int64_t op, svbool_t pg) : "SQDECP Xtied, Pg.S" + public static unsafe long SaturatingDecrementByActiveElementCount(long value, Vector from); + + /// int64_t svqdecp[_n_s64]_b64(int64_t op, svbool_t pg) : "SQDECP Xtied, Pg.D" + public static unsafe long SaturatingDecrementByActiveElementCount(long value, Vector from); + + /// uint32_t svqdecp[_n_u32]_b8(uint32_t op, svbool_t pg) : "UQDECP Wtied, Pg.B" + public static unsafe uint SaturatingDecrementByActiveElementCount(uint value, Vector from); + + /// uint32_t svqdecp[_n_u32]_b16(uint32_t op, svbool_t pg) : "UQDECP Wtied, Pg.H" + public static unsafe uint SaturatingDecrementByActiveElementCount(uint value, Vector from); + + /// uint32_t svqdecp[_n_u32]_b32(uint32_t op, svbool_t pg) : "UQDECP Wtied, Pg.S" + public static unsafe uint SaturatingDecrementByActiveElementCount(uint value, Vector from); + + /// uint32_t svqdecp[_n_u32]_b64(uint32_t op, svbool_t pg) : "UQDECP Wtied, Pg.D" + public static unsafe uint SaturatingDecrementByActiveElementCount(uint value, Vector from); + + /// uint64_t svqdecp[_n_u64]_b8(uint64_t op, svbool_t pg) : "UQDECP Xtied, Pg.B" + public static unsafe ulong SaturatingDecrementByActiveElementCount(ulong value, Vector from); + + /// uint64_t svqdecp[_n_u64]_b16(uint64_t op, svbool_t pg) : "UQDECP Xtied, Pg.H" + public static unsafe ulong SaturatingDecrementByActiveElementCount(ulong value, Vector from); + + /// uint64_t svqdecp[_n_u64]_b32(uint64_t op, svbool_t pg) : "UQDECP Xtied, Pg.S" + public static unsafe ulong SaturatingDecrementByActiveElementCount(ulong value, Vector from); + + /// uint64_t svqdecp[_n_u64]_b64(uint64_t op, svbool_t pg) : "UQDECP Xtied, Pg.D" + public static unsafe ulong SaturatingDecrementByActiveElementCount(ulong value, Vector from); + + /// svint16_t svqdecp[_s16](svint16_t op, svbool_t pg) : "SQDECP Ztied.H, Pg" or "MOVPRFX Zresult, Zop; SQDECP Zresult.H, Pg" + public static unsafe Vector SaturatingDecrementByActiveElementCount(Vector value, Vector from); + + /// svint32_t svqdecp[_s32](svint32_t op, svbool_t pg) : "SQDECP Ztied.S, Pg" or "MOVPRFX Zresult, Zop; SQDECP Zresult.S, Pg" + public static unsafe Vector SaturatingDecrementByActiveElementCount(Vector value, Vector from); + + /// svint64_t svqdecp[_s64](svint64_t op, svbool_t pg) : "SQDECP Ztied.D, Pg" or "MOVPRFX Zresult, Zop; SQDECP Zresult.D, Pg" + public static unsafe Vector SaturatingDecrementByActiveElementCount(Vector value, Vector from); + + /// svuint16_t svqdecp[_u16](svuint16_t op, svbool_t pg) : "UQDECP Ztied.H, Pg" or "MOVPRFX Zresult, Zop; UQDECP Zresult.H, Pg" + public static unsafe Vector SaturatingDecrementByActiveElementCount(Vector value, Vector from); + + /// svuint32_t svqdecp[_u32](svuint32_t op, svbool_t pg) : "UQDECP Ztied.S, Pg" or "MOVPRFX Zresult, Zop; UQDECP Zresult.S, Pg" + public static unsafe Vector SaturatingDecrementByActiveElementCount(Vector value, Vector from); + + /// svuint64_t svqdecp[_u64](svuint64_t op, svbool_t pg) : "UQDECP Ztied.D, Pg" or "MOVPRFX Zresult, Zop; UQDECP Zresult.D, Pg" + public static unsafe Vector SaturatingDecrementByActiveElementCount(Vector value, Vector from); + + + /// SaturatingIncrementBy16BitElementCount : Saturating increment by number of halfword elements + + /// int32_t svqinch_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) : "SQINCH Xtied, Wtied, pattern, MUL #imm_factor" + public static unsafe int SaturatingIncrementBy16BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// int64_t svqinch_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) : "SQINCH Xtied, pattern, MUL #imm_factor" + public static unsafe long SaturatingIncrementBy16BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// uint32_t svqinch_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) : "UQINCH Wtied, pattern, MUL #imm_factor" + public static unsafe uint SaturatingIncrementBy16BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// uint64_t svqinch_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) : "UQINCH Xtied, pattern, MUL #imm_factor" + public static unsafe ulong SaturatingIncrementBy16BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// svint16_t svqinch_pat[_s16](svint16_t op, enum svpattern pattern, uint64_t imm_factor) : "SQINCH Ztied.H, pattern, MUL #imm_factor" or "MOVPRFX Zresult, Zop; SQINCH Zresult.H, pattern, MUL #imm_factor" + public static unsafe Vector SaturatingIncrementBy16BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// svuint16_t svqinch_pat[_u16](svuint16_t op, enum svpattern pattern, uint64_t imm_factor) : "UQINCH Ztied.H, pattern, MUL #imm_factor" or "MOVPRFX Zresult, Zop; UQINCH Zresult.H, pattern, MUL #imm_factor" + public static unsafe Vector SaturatingIncrementBy16BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + + /// SaturatingIncrementBy32BitElementCount : Saturating increment by number of word elements + + /// int32_t svqincw_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) : "SQINCW Xtied, Wtied, pattern, MUL #imm_factor" + public static unsafe int SaturatingIncrementBy32BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// int64_t svqincw_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) : "SQINCW Xtied, pattern, MUL #imm_factor" + public static unsafe long SaturatingIncrementBy32BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// uint32_t svqincw_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) : "UQINCW Wtied, pattern, MUL #imm_factor" + public static unsafe uint SaturatingIncrementBy32BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// uint64_t svqincw_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) : "UQINCW Xtied, pattern, MUL #imm_factor" + public static unsafe ulong SaturatingIncrementBy32BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// svint32_t svqincw_pat[_s32](svint32_t op, enum svpattern pattern, uint64_t imm_factor) : "SQINCW Ztied.S, pattern, MUL #imm_factor" or "MOVPRFX Zresult, Zop; SQINCW Zresult.S, pattern, MUL #imm_factor" + public static unsafe Vector SaturatingIncrementBy32BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// svuint32_t svqincw_pat[_u32](svuint32_t op, enum svpattern pattern, uint64_t imm_factor) : "UQINCW Ztied.S, pattern, MUL #imm_factor" or "MOVPRFX Zresult, Zop; UQINCW Zresult.S, pattern, MUL #imm_factor" + public static unsafe Vector SaturatingIncrementBy32BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + + /// SaturatingIncrementBy64BitElementCount : Saturating increment by number of doubleword elements + + /// int32_t svqincd_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) : "SQINCD Xtied, Wtied, pattern, MUL #imm_factor" + public static unsafe int SaturatingIncrementBy64BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// int64_t svqincd_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) : "SQINCD Xtied, pattern, MUL #imm_factor" + public static unsafe long SaturatingIncrementBy64BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// uint32_t svqincd_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) : "UQINCD Wtied, pattern, MUL #imm_factor" + public static unsafe uint SaturatingIncrementBy64BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// uint64_t svqincd_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) : "UQINCD Xtied, pattern, MUL #imm_factor" + public static unsafe ulong SaturatingIncrementBy64BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// svint64_t svqincd_pat[_s64](svint64_t op, enum svpattern pattern, uint64_t imm_factor) : "SQINCD Ztied.D, pattern, MUL #imm_factor" or "MOVPRFX Zresult, Zop; SQINCD Zresult.D, pattern, MUL #imm_factor" + public static unsafe Vector SaturatingIncrementBy64BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// svuint64_t svqincd_pat[_u64](svuint64_t op, enum svpattern pattern, uint64_t imm_factor) : "UQINCD Ztied.D, pattern, MUL #imm_factor" or "MOVPRFX Zresult, Zop; UQINCD Zresult.D, pattern, MUL #imm_factor" + public static unsafe Vector SaturatingIncrementBy64BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + + /// SaturatingIncrementBy8BitElementCount : Saturating increment by number of byte elements + + /// int32_t svqincb_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) : "SQINCB Xtied, Wtied, pattern, MUL #imm_factor" + public static unsafe int SaturatingIncrementBy8BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// int64_t svqincb_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) : "SQINCB Xtied, pattern, MUL #imm_factor" + public static unsafe long SaturatingIncrementBy8BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// uint32_t svqincb_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) : "UQINCB Wtied, pattern, MUL #imm_factor" + public static unsafe uint SaturatingIncrementBy8BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + /// uint64_t svqincb_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) : "UQINCB Xtied, pattern, MUL #imm_factor" + public static unsafe ulong SaturatingIncrementBy8BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + + /// SaturatingIncrementByActiveElementCount : Saturating increment by active element count + + /// int32_t svqincp[_n_s32]_b8(int32_t op, svbool_t pg) : "SQINCP Xtied, Pg.B, Wtied" + public static unsafe int SaturatingIncrementByActiveElementCount(int value, Vector from); + + /// int32_t svqincp[_n_s32]_b16(int32_t op, svbool_t pg) : "SQINCP Xtied, Pg.H, Wtied" + public static unsafe int SaturatingIncrementByActiveElementCount(int value, Vector from); + + /// int32_t svqincp[_n_s32]_b32(int32_t op, svbool_t pg) : "SQINCP Xtied, Pg.S, Wtied" + public static unsafe int SaturatingIncrementByActiveElementCount(int value, Vector from); + + /// int32_t svqincp[_n_s32]_b64(int32_t op, svbool_t pg) : "SQINCP Xtied, Pg.D, Wtied" + public static unsafe int SaturatingIncrementByActiveElementCount(int value, Vector from); + + /// int64_t svqincp[_n_s64]_b8(int64_t op, svbool_t pg) : "SQINCP Xtied, Pg.B" + public static unsafe long SaturatingIncrementByActiveElementCount(long value, Vector from); + + /// int64_t svqincp[_n_s64]_b16(int64_t op, svbool_t pg) : "SQINCP Xtied, Pg.H" + public static unsafe long SaturatingIncrementByActiveElementCount(long value, Vector from); + + /// int64_t svqincp[_n_s64]_b32(int64_t op, svbool_t pg) : "SQINCP Xtied, Pg.S" + public static unsafe long SaturatingIncrementByActiveElementCount(long value, Vector from); + + /// int64_t svqincp[_n_s64]_b64(int64_t op, svbool_t pg) : "SQINCP Xtied, Pg.D" + public static unsafe long SaturatingIncrementByActiveElementCount(long value, Vector from); + + /// uint32_t svqincp[_n_u32]_b8(uint32_t op, svbool_t pg) : "UQINCP Wtied, Pg.B" + public static unsafe uint SaturatingIncrementByActiveElementCount(uint value, Vector from); + + /// uint32_t svqincp[_n_u32]_b16(uint32_t op, svbool_t pg) : "UQINCP Wtied, Pg.H" + public static unsafe uint SaturatingIncrementByActiveElementCount(uint value, Vector from); + + /// uint32_t svqincp[_n_u32]_b32(uint32_t op, svbool_t pg) : "UQINCP Wtied, Pg.S" + public static unsafe uint SaturatingIncrementByActiveElementCount(uint value, Vector from); + + /// uint32_t svqincp[_n_u32]_b64(uint32_t op, svbool_t pg) : "UQINCP Wtied, Pg.D" + public static unsafe uint SaturatingIncrementByActiveElementCount(uint value, Vector from); + + /// uint64_t svqincp[_n_u64]_b8(uint64_t op, svbool_t pg) : "UQINCP Xtied, Pg.B" + public static unsafe ulong SaturatingIncrementByActiveElementCount(ulong value, Vector from); + + /// uint64_t svqincp[_n_u64]_b16(uint64_t op, svbool_t pg) : "UQINCP Xtied, Pg.H" + public static unsafe ulong SaturatingIncrementByActiveElementCount(ulong value, Vector from); + + /// uint64_t svqincp[_n_u64]_b32(uint64_t op, svbool_t pg) : "UQINCP Xtied, Pg.S" + public static unsafe ulong SaturatingIncrementByActiveElementCount(ulong value, Vector from); + + /// uint64_t svqincp[_n_u64]_b64(uint64_t op, svbool_t pg) : "UQINCP Xtied, Pg.D" + public static unsafe ulong SaturatingIncrementByActiveElementCount(ulong value, Vector from); + + /// svint16_t svqincp[_s16](svint16_t op, svbool_t pg) : "SQINCP Ztied.H, Pg" or "MOVPRFX Zresult, Zop; SQINCP Zresult.H, Pg" + public static unsafe Vector SaturatingIncrementByActiveElementCount(Vector value, Vector from); + + /// svint32_t svqincp[_s32](svint32_t op, svbool_t pg) : "SQINCP Ztied.S, Pg" or "MOVPRFX Zresult, Zop; SQINCP Zresult.S, Pg" + public static unsafe Vector SaturatingIncrementByActiveElementCount(Vector value, Vector from); + + /// svint64_t svqincp[_s64](svint64_t op, svbool_t pg) : "SQINCP Ztied.D, Pg" or "MOVPRFX Zresult, Zop; SQINCP Zresult.D, Pg" + public static unsafe Vector SaturatingIncrementByActiveElementCount(Vector value, Vector from); + + /// svuint16_t svqincp[_u16](svuint16_t op, svbool_t pg) : "UQINCP Ztied.H, Pg" or "MOVPRFX Zresult, Zop; UQINCP Zresult.H, Pg" + public static unsafe Vector SaturatingIncrementByActiveElementCount(Vector value, Vector from); + + /// svuint32_t svqincp[_u32](svuint32_t op, svbool_t pg) : "UQINCP Ztied.S, Pg" or "MOVPRFX Zresult, Zop; UQINCP Zresult.S, Pg" + public static unsafe Vector SaturatingIncrementByActiveElementCount(Vector value, Vector from); + + /// svuint64_t svqincp[_u64](svuint64_t op, svbool_t pg) : "UQINCP Ztied.D, Pg" or "MOVPRFX Zresult, Zop; UQINCP Zresult.D, Pg" + public static unsafe Vector SaturatingIncrementByActiveElementCount(Vector value, Vector from); + + + /// total method signatures: 124 + /// total method names: 19 +} + + + /// Rejected: + /// public static unsafe ulong Count16BitElements(); // svcnth + /// public static unsafe ulong Count32BitElements(); // svcntw + /// public static unsafe ulong Count64BitElements(); // svcntd + /// public static unsafe ulong Count8BitElements(); // svcntb + /// public static unsafe ulong CountElementsInAFullVector(Vector value); // svlen[_f32] + /// public static unsafe ulong CountElementsInAFullVector(Vector value); // svlen[_f64] + /// public static unsafe ulong CountElementsInAFullVector(Vector value); // svlen[_s8] + /// public static unsafe ulong CountElementsInAFullVector(Vector value); // svlen[_s16] + /// public static unsafe ulong CountElementsInAFullVector(Vector value); // svlen[_s32] + /// public static unsafe ulong CountElementsInAFullVector(Vector value); // svlen[_s64] + /// public static unsafe ulong CountElementsInAFullVector(Vector value); // svlen[_u8] + /// public static unsafe ulong CountElementsInAFullVector(Vector value); // svlen[_u16] + /// public static unsafe ulong CountElementsInAFullVector(Vector value); // svlen[_u32] + /// public static unsafe ulong CountElementsInAFullVector(Vector value); // svlen[_u64] + /// public static unsafe int SaturatingDecrementBy16BitElementCount(int value, ulong from); // svqdech[_n_s32] + /// public static unsafe long SaturatingDecrementBy16BitElementCount(long value, ulong from); // svqdech[_n_s64] + /// public static unsafe uint SaturatingDecrementBy16BitElementCount(uint value, ulong from); // svqdech[_n_u32] + /// public static unsafe ulong SaturatingDecrementBy16BitElementCount(ulong value, ulong from); // svqdech[_n_u64] + /// public static unsafe Vector SaturatingDecrementBy16BitElementCount(Vector value, ulong from); // svqdech[_s16] + /// public static unsafe Vector SaturatingDecrementBy16BitElementCount(Vector value, ulong from); // svqdech[_u16] + /// public static unsafe int SaturatingDecrementBy32BitElementCount(int value, ulong from); // svqdecw[_n_s32] + /// public static unsafe long SaturatingDecrementBy32BitElementCount(long value, ulong from); // svqdecw[_n_s64] + /// public static unsafe uint SaturatingDecrementBy32BitElementCount(uint value, ulong from); // svqdecw[_n_u32] + /// public static unsafe ulong SaturatingDecrementBy32BitElementCount(ulong value, ulong from); // svqdecw[_n_u64] + /// public static unsafe Vector SaturatingDecrementBy32BitElementCount(Vector value, ulong from); // svqdecw[_s32] + /// public static unsafe Vector SaturatingDecrementBy32BitElementCount(Vector value, ulong from); // svqdecw[_u32] + /// public static unsafe int SaturatingDecrementBy64BitElementCount(int value, ulong from); // svqdecd[_n_s32] + /// public static unsafe long SaturatingDecrementBy64BitElementCount(long value, ulong from); // svqdecd[_n_s64] + /// public static unsafe uint SaturatingDecrementBy64BitElementCount(uint value, ulong from); // svqdecd[_n_u32] + /// public static unsafe ulong SaturatingDecrementBy64BitElementCount(ulong value, ulong from); // svqdecd[_n_u64] + /// public static unsafe Vector SaturatingDecrementBy64BitElementCount(Vector value, ulong from); // svqdecd[_s64] + /// public static unsafe Vector SaturatingDecrementBy64BitElementCount(Vector value, ulong from); // svqdecd[_u64] + /// public static unsafe int SaturatingDecrementBy8BitElementCount(int value, ulong from); // svqdecb[_n_s32] + /// public static unsafe long SaturatingDecrementBy8BitElementCount(long value, ulong from); // svqdecb[_n_s64] + /// public static unsafe uint SaturatingDecrementBy8BitElementCount(uint value, ulong from); // svqdecb[_n_u32] + /// public static unsafe ulong SaturatingDecrementBy8BitElementCount(ulong value, ulong from); // svqdecb[_n_u64] + /// public static unsafe int SaturatingIncrementBy16BitElementCount(int value, ulong from); // svqinch[_n_s32] + /// public static unsafe long SaturatingIncrementBy16BitElementCount(long value, ulong from); // svqinch[_n_s64] + /// public static unsafe uint SaturatingIncrementBy16BitElementCount(uint value, ulong from); // svqinch[_n_u32] + /// public static unsafe ulong SaturatingIncrementBy16BitElementCount(ulong value, ulong from); // svqinch[_n_u64] + /// public static unsafe Vector SaturatingIncrementBy16BitElementCount(Vector value, ulong from); // svqinch[_s16] + /// public static unsafe Vector SaturatingIncrementBy16BitElementCount(Vector value, ulong from); // svqinch[_u16] + /// public static unsafe int SaturatingIncrementBy32BitElementCount(int value, ulong from); // svqincw[_n_s32] + /// public static unsafe long SaturatingIncrementBy32BitElementCount(long value, ulong from); // svqincw[_n_s64] + /// public static unsafe uint SaturatingIncrementBy32BitElementCount(uint value, ulong from); // svqincw[_n_u32] + /// public static unsafe ulong SaturatingIncrementBy32BitElementCount(ulong value, ulong from); // svqincw[_n_u64] + /// public static unsafe Vector SaturatingIncrementBy32BitElementCount(Vector value, ulong from); // svqincw[_s32] + /// public static unsafe Vector SaturatingIncrementBy32BitElementCount(Vector value, ulong from); // svqincw[_u32] + /// public static unsafe int SaturatingIncrementBy64BitElementCount(int value, ulong from); // svqincd[_n_s32] + /// public static unsafe long SaturatingIncrementBy64BitElementCount(long value, ulong from); // svqincd[_n_s64] + /// public static unsafe uint SaturatingIncrementBy64BitElementCount(uint value, ulong from); // svqincd[_n_u32] + /// public static unsafe ulong SaturatingIncrementBy64BitElementCount(ulong value, ulong from); // svqincd[_n_u64] + /// public static unsafe Vector SaturatingIncrementBy64BitElementCount(Vector value, ulong from); // svqincd[_s64] + /// public static unsafe Vector SaturatingIncrementBy64BitElementCount(Vector value, ulong from); // svqincd[_u64] + /// public static unsafe int SaturatingIncrementBy8BitElementCount(int value, ulong from); // svqincb[_n_s32] + /// public static unsafe long SaturatingIncrementBy8BitElementCount(long value, ulong from); // svqincb[_n_s64] + /// public static unsafe uint SaturatingIncrementBy8BitElementCount(uint value, ulong from); // svqincb[_n_u32] + /// public static unsafe ulong SaturatingIncrementBy8BitElementCount(ulong value, ulong from); // svqincb[_n_u64] + /// Total Rejected: 58 + + /// Total ACLE covered across API: 226 + diff --git a/sve_api/out_api/apiraw_FEAT_SVE__firstfaulting.cs b/sve_api/out_api/apiraw_FEAT_SVE__firstfaulting.cs new file mode 100644 index 0000000000000..c35310d9a2517 --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_SVE__firstfaulting.cs @@ -0,0 +1,827 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: firstfaulting +{ + + /// T: [int, uint], [uint, uint], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1B + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets); // LDFF1B + + /// T: [float, uint], [int, uint], [double, ulong], [long, ulong] + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses); // LDFF1W or LDFF1D + + /// T: uint, ulong + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses); // LDFF1W or LDFF1D + + /// T: [float, int], [uint, int], [float, uint], [int, uint], [double, long], [ulong, long], [double, ulong], [long, ulong] + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, T* address, Vector indices); // LDFF1W or LDFF1D + + /// T: int, uint, long, ulong + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, T* address, Vector indices); // LDFF1W or LDFF1D + + /// T: [int, uint], [uint, uint], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1SH + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices); // LDFF1SH + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets); // LDFF1SH + + /// T: [long, ulong], [int, uint], [ulong, ulong], [uint, uint] + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1SW + + /// T: [long, long], [int, int], [ulong, long], [uint, int], [long, ulong], [int, uint], [ulong, ulong], [uint, uint] + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices); // LDFF1SW + + /// T: [long, long], [int, int], [ulong, long], [uint, int], [long, ulong], [int, uint], [ulong, ulong], [uint, uint] + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets); // LDFF1SW + + /// T: [int, uint], [uint, uint], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1SB + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets); // LDFF1SB + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets); // LDFF1H + + /// T: [int, uint], [uint, uint], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1H + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices); // LDFF1H + + /// T: [long, long], [int, int], [ulong, long], [uint, int], [long, ulong], [int, uint], [ulong, ulong], [uint, uint] + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets); // LDFF1W + + /// T: [long, ulong], [int, uint], [ulong, ulong], [uint, uint] + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1W + + /// T: [long, long], [int, int], [ulong, long], [uint, int], [long, ulong], [int, uint], [ulong, ulong], [uint, uint] + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices); // LDFF1W + + /// T: [float, int], [uint, int], [float, uint], [int, uint], [double, long], [ulong, long], [double, ulong], [long, ulong] + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, T* address, Vector offsets); // LDFF1W or LDFF1D + + /// T: int, uint, long, ulong + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, T* address, Vector offsets); // LDFF1W or LDFF1D + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector GetFfr(); // RDFFR // predicated + + /// T: short, int, long, ushort, uint, ulong + public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address); // LDFF1B + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, T* address); // LDFF1W or LDFF1D or LDFF1B or LDFF1H + + /// T: int, long, uint, ulong + public static unsafe Vector LoadVectorInt16SignExtendFirstFaulting(Vector mask, short* address); // LDFF1SH + + /// T: long, ulong + public static unsafe Vector LoadVectorInt32SignExtendFirstFaulting(Vector mask, int* address); // LDFF1SW + + /// T: short, int, long, ushort, uint, ulong + public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address); // LDFF1SB + + /// T: int, long, uint, ulong + public static unsafe Vector LoadVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address); // LDFF1H + + /// T: long, ulong + public static unsafe Vector LoadVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address); // LDFF1W + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe void SetFfr(Vector value); // WRFFR + + /// total method signatures: 31 + +} + + +/// Full API +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: firstfaulting +{ + /// GatherVectorByteZeroExtendFirstFaulting : Load 8-bit data and zero-extend, first-faulting + + /// svint32_t svldff1ub_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) : "LDFF1B Zresult.S, Pg/Z, [Zbases.S, #0]" + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, Vector addresses); + + /// svuint32_t svldff1ub_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) : "LDFF1B Zresult.S, Pg/Z, [Zbases.S, #0]" + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, Vector addresses); + + /// svint64_t svldff1ub_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) : "LDFF1B Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, Vector addresses); + + /// svuint64_t svldff1ub_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) : "LDFF1B Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, Vector addresses); + + /// svint32_t svldff1ub_gather_[s32]offset_s32(svbool_t pg, const uint8_t *base, svint32_t offsets) : "LDFF1B Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]" + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets); + + /// svuint32_t svldff1ub_gather_[s32]offset_u32(svbool_t pg, const uint8_t *base, svint32_t offsets) : "LDFF1B Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]" + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets); + + /// svint32_t svldff1ub_gather_[u32]offset_s32(svbool_t pg, const uint8_t *base, svuint32_t offsets) : "LDFF1B Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]" + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets); + + /// svuint32_t svldff1ub_gather_[u32]offset_u32(svbool_t pg, const uint8_t *base, svuint32_t offsets) : "LDFF1B Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]" + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets); + + /// svint64_t svldff1ub_gather_[s64]offset_s64(svbool_t pg, const uint8_t *base, svint64_t offsets) : "LDFF1B Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets); + + /// svuint64_t svldff1ub_gather_[s64]offset_u64(svbool_t pg, const uint8_t *base, svint64_t offsets) : "LDFF1B Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets); + + /// svint64_t svldff1ub_gather_[u64]offset_s64(svbool_t pg, const uint8_t *base, svuint64_t offsets) : "LDFF1B Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets); + + /// svuint64_t svldff1ub_gather_[u64]offset_u64(svbool_t pg, const uint8_t *base, svuint64_t offsets) : "LDFF1B Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets); + + + /// GatherVectorFirstFaulting : Unextended load, first-faulting + + /// svfloat32_t svldff1_gather[_u32base]_f32(svbool_t pg, svuint32_t bases) : "LDFF1W Zresult.S, Pg/Z, [Zbases.S, #0]" + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses); + + /// svint32_t svldff1_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) : "LDFF1W Zresult.S, Pg/Z, [Zbases.S, #0]" + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses); + + /// svuint32_t svldff1_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) : "LDFF1W Zresult.S, Pg/Z, [Zbases.S, #0]" + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses); + + /// svfloat64_t svldff1_gather[_u64base]_f64(svbool_t pg, svuint64_t bases) : "LDFF1D Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses); + + /// svint64_t svldff1_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) : "LDFF1D Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses); + + /// svuint64_t svldff1_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) : "LDFF1D Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses); + + /// svfloat32_t svldff1_gather_[s32]index[_f32](svbool_t pg, const float32_t *base, svint32_t indices) : "LDFF1W Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #2]" + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, float* address, Vector indices); + + /// svint32_t svldff1_gather_[s32]index[_s32](svbool_t pg, const int32_t *base, svint32_t indices) : "LDFF1W Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #2]" + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, int* address, Vector indices); + + /// svuint32_t svldff1_gather_[s32]index[_u32](svbool_t pg, const uint32_t *base, svint32_t indices) : "LDFF1W Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #2]" + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, uint* address, Vector indices); + + /// svfloat32_t svldff1_gather_[u32]index[_f32](svbool_t pg, const float32_t *base, svuint32_t indices) : "LDFF1W Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #2]" + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, float* address, Vector indices); + + /// svint32_t svldff1_gather_[u32]index[_s32](svbool_t pg, const int32_t *base, svuint32_t indices) : "LDFF1W Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #2]" + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, int* address, Vector indices); + + /// svuint32_t svldff1_gather_[u32]index[_u32](svbool_t pg, const uint32_t *base, svuint32_t indices) : "LDFF1W Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #2]" + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, uint* address, Vector indices); + + /// svfloat64_t svldff1_gather_[s64]index[_f64](svbool_t pg, const float64_t *base, svint64_t indices) : "LDFF1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3]" + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, double* address, Vector indices); + + /// svint64_t svldff1_gather_[s64]index[_s64](svbool_t pg, const int64_t *base, svint64_t indices) : "LDFF1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3]" + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, long* address, Vector indices); + + /// svuint64_t svldff1_gather_[s64]index[_u64](svbool_t pg, const uint64_t *base, svint64_t indices) : "LDFF1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3]" + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, ulong* address, Vector indices); + + /// svfloat64_t svldff1_gather_[u64]index[_f64](svbool_t pg, const float64_t *base, svuint64_t indices) : "LDFF1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3]" + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, double* address, Vector indices); + + /// svint64_t svldff1_gather_[u64]index[_s64](svbool_t pg, const int64_t *base, svuint64_t indices) : "LDFF1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3]" + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, long* address, Vector indices); + + /// svuint64_t svldff1_gather_[u64]index[_u64](svbool_t pg, const uint64_t *base, svuint64_t indices) : "LDFF1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3]" + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, ulong* address, Vector indices); + + + /// GatherVectorInt16SignExtendFirstFaulting : Load 16-bit data and sign-extend, first-faulting + + /// svint32_t svldff1sh_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) : "LDFF1SH Zresult.S, Pg/Z, [Zbases.S, #0]" + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, Vector addresses); + + /// svuint32_t svldff1sh_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) : "LDFF1SH Zresult.S, Pg/Z, [Zbases.S, #0]" + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, Vector addresses); + + /// svint64_t svldff1sh_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) : "LDFF1SH Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, Vector addresses); + + /// svuint64_t svldff1sh_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) : "LDFF1SH Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, Vector addresses); + + /// svint32_t svldff1sh_gather_[s32]index_s32(svbool_t pg, const int16_t *base, svint32_t indices) : "LDFF1SH Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #1]" + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices); + + /// svuint32_t svldff1sh_gather_[s32]index_u32(svbool_t pg, const int16_t *base, svint32_t indices) : "LDFF1SH Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #1]" + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices); + + /// svint32_t svldff1sh_gather_[u32]index_s32(svbool_t pg, const int16_t *base, svuint32_t indices) : "LDFF1SH Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #1]" + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices); + + /// svuint32_t svldff1sh_gather_[u32]index_u32(svbool_t pg, const int16_t *base, svuint32_t indices) : "LDFF1SH Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #1]" + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices); + + /// svint64_t svldff1sh_gather_[s64]index_s64(svbool_t pg, const int16_t *base, svint64_t indices) : "LDFF1SH Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1]" + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices); + + /// svuint64_t svldff1sh_gather_[s64]index_u64(svbool_t pg, const int16_t *base, svint64_t indices) : "LDFF1SH Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1]" + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices); + + /// svint64_t svldff1sh_gather_[u64]index_s64(svbool_t pg, const int16_t *base, svuint64_t indices) : "LDFF1SH Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1]" + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices); + + /// svuint64_t svldff1sh_gather_[u64]index_u64(svbool_t pg, const int16_t *base, svuint64_t indices) : "LDFF1SH Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1]" + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices); + + + /// GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting : Load 16-bit data and sign-extend, first-faulting + + /// svint32_t svldff1sh_gather_[s32]offset_s32(svbool_t pg, const int16_t *base, svint32_t offsets) : "LDFF1SH Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]" + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets); + + /// svuint32_t svldff1sh_gather_[s32]offset_u32(svbool_t pg, const int16_t *base, svint32_t offsets) : "LDFF1SH Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]" + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets); + + /// svint32_t svldff1sh_gather_[u32]offset_s32(svbool_t pg, const int16_t *base, svuint32_t offsets) : "LDFF1SH Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]" + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets); + + /// svuint32_t svldff1sh_gather_[u32]offset_u32(svbool_t pg, const int16_t *base, svuint32_t offsets) : "LDFF1SH Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]" + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets); + + /// svint64_t svldff1sh_gather_[s64]offset_s64(svbool_t pg, const int16_t *base, svint64_t offsets) : "LDFF1SH Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets); + + /// svuint64_t svldff1sh_gather_[s64]offset_u64(svbool_t pg, const int16_t *base, svint64_t offsets) : "LDFF1SH Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets); + + /// svint64_t svldff1sh_gather_[u64]offset_s64(svbool_t pg, const int16_t *base, svuint64_t offsets) : "LDFF1SH Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets); + + /// svuint64_t svldff1sh_gather_[u64]offset_u64(svbool_t pg, const int16_t *base, svuint64_t offsets) : "LDFF1SH Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets); + + + /// GatherVectorInt32SignExtendFirstFaulting : Load 32-bit data and sign-extend, first-faulting + + /// svint64_t svldff1sw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) : "LDFF1SW Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, Vector addresses); + + /// svint64_t svldff1sw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) : "LDFF1SW Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, Vector addresses); + + /// svuint64_t svldff1sw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) : "LDFF1SW Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, Vector addresses); + + /// svuint64_t svldff1sw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) : "LDFF1SW Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, Vector addresses); + + /// svint64_t svldff1sw_gather_[s64]index_s64(svbool_t pg, const int32_t *base, svint64_t indices) : "LDFF1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices); + + /// svint64_t svldff1sw_gather_[s64]index_s64(svbool_t pg, const int32_t *base, svint64_t indices) : "LDFF1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices); + + /// svuint64_t svldff1sw_gather_[s64]index_u64(svbool_t pg, const int32_t *base, svint64_t indices) : "LDFF1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices); + + /// svuint64_t svldff1sw_gather_[s64]index_u64(svbool_t pg, const int32_t *base, svint64_t indices) : "LDFF1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices); + + /// svint64_t svldff1sw_gather_[u64]index_s64(svbool_t pg, const int32_t *base, svuint64_t indices) : "LDFF1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices); + + /// svint64_t svldff1sw_gather_[u64]index_s64(svbool_t pg, const int32_t *base, svuint64_t indices) : "LDFF1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices); + + /// svuint64_t svldff1sw_gather_[u64]index_u64(svbool_t pg, const int32_t *base, svuint64_t indices) : "LDFF1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices); + + /// svuint64_t svldff1sw_gather_[u64]index_u64(svbool_t pg, const int32_t *base, svuint64_t indices) : "LDFF1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices); + + + /// GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting : Load 32-bit data and sign-extend, first-faulting + + /// svint64_t svldff1sw_gather_[s64]offset_s64(svbool_t pg, const int32_t *base, svint64_t offsets) : "LDFF1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets); + + /// svint64_t svldff1sw_gather_[s64]offset_s64(svbool_t pg, const int32_t *base, svint64_t offsets) : "LDFF1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets); + + /// svuint64_t svldff1sw_gather_[s64]offset_u64(svbool_t pg, const int32_t *base, svint64_t offsets) : "LDFF1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets); + + /// svuint64_t svldff1sw_gather_[s64]offset_u64(svbool_t pg, const int32_t *base, svint64_t offsets) : "LDFF1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets); + + /// svint64_t svldff1sw_gather_[u64]offset_s64(svbool_t pg, const int32_t *base, svuint64_t offsets) : "LDFF1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets); + + /// svint64_t svldff1sw_gather_[u64]offset_s64(svbool_t pg, const int32_t *base, svuint64_t offsets) : "LDFF1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets); + + /// svuint64_t svldff1sw_gather_[u64]offset_u64(svbool_t pg, const int32_t *base, svuint64_t offsets) : "LDFF1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets); + + /// svuint64_t svldff1sw_gather_[u64]offset_u64(svbool_t pg, const int32_t *base, svuint64_t offsets) : "LDFF1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets); + + + /// GatherVectorSByteSignExtendFirstFaulting : Load 8-bit data and sign-extend, first-faulting + + /// svint32_t svldff1sb_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) : "LDFF1SB Zresult.S, Pg/Z, [Zbases.S, #0]" + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, Vector addresses); + + /// svuint32_t svldff1sb_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) : "LDFF1SB Zresult.S, Pg/Z, [Zbases.S, #0]" + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, Vector addresses); + + /// svint64_t svldff1sb_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) : "LDFF1SB Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, Vector addresses); + + /// svuint64_t svldff1sb_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) : "LDFF1SB Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, Vector addresses); + + /// svint32_t svldff1sb_gather_[s32]offset_s32(svbool_t pg, const int8_t *base, svint32_t offsets) : "LDFF1SB Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]" + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets); + + /// svuint32_t svldff1sb_gather_[s32]offset_u32(svbool_t pg, const int8_t *base, svint32_t offsets) : "LDFF1SB Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]" + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets); + + /// svint32_t svldff1sb_gather_[u32]offset_s32(svbool_t pg, const int8_t *base, svuint32_t offsets) : "LDFF1SB Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]" + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets); + + /// svuint32_t svldff1sb_gather_[u32]offset_u32(svbool_t pg, const int8_t *base, svuint32_t offsets) : "LDFF1SB Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]" + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets); + + /// svint64_t svldff1sb_gather_[s64]offset_s64(svbool_t pg, const int8_t *base, svint64_t offsets) : "LDFF1SB Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets); + + /// svuint64_t svldff1sb_gather_[s64]offset_u64(svbool_t pg, const int8_t *base, svint64_t offsets) : "LDFF1SB Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets); + + /// svint64_t svldff1sb_gather_[u64]offset_s64(svbool_t pg, const int8_t *base, svuint64_t offsets) : "LDFF1SB Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets); + + /// svuint64_t svldff1sb_gather_[u64]offset_u64(svbool_t pg, const int8_t *base, svuint64_t offsets) : "LDFF1SB Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets); + + + /// GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting : Load 16-bit data and zero-extend, first-faulting + + /// svint32_t svldff1uh_gather_[s32]offset_s32(svbool_t pg, const uint16_t *base, svint32_t offsets) : "LDFF1H Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]" + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets); + + /// svuint32_t svldff1uh_gather_[s32]offset_u32(svbool_t pg, const uint16_t *base, svint32_t offsets) : "LDFF1H Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]" + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets); + + /// svint32_t svldff1uh_gather_[u32]offset_s32(svbool_t pg, const uint16_t *base, svuint32_t offsets) : "LDFF1H Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]" + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets); + + /// svuint32_t svldff1uh_gather_[u32]offset_u32(svbool_t pg, const uint16_t *base, svuint32_t offsets) : "LDFF1H Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]" + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets); + + /// svint64_t svldff1uh_gather_[s64]offset_s64(svbool_t pg, const uint16_t *base, svint64_t offsets) : "LDFF1H Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets); + + /// svuint64_t svldff1uh_gather_[s64]offset_u64(svbool_t pg, const uint16_t *base, svint64_t offsets) : "LDFF1H Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets); + + /// svint64_t svldff1uh_gather_[u64]offset_s64(svbool_t pg, const uint16_t *base, svuint64_t offsets) : "LDFF1H Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets); + + /// svuint64_t svldff1uh_gather_[u64]offset_u64(svbool_t pg, const uint16_t *base, svuint64_t offsets) : "LDFF1H Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets); + + + /// GatherVectorUInt16ZeroExtendFirstFaulting : Load 16-bit data and zero-extend, first-faulting + + /// svint32_t svldff1uh_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) : "LDFF1H Zresult.S, Pg/Z, [Zbases.S, #0]" + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, Vector addresses); + + /// svuint32_t svldff1uh_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) : "LDFF1H Zresult.S, Pg/Z, [Zbases.S, #0]" + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, Vector addresses); + + /// svint64_t svldff1uh_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) : "LDFF1H Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, Vector addresses); + + /// svuint64_t svldff1uh_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) : "LDFF1H Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, Vector addresses); + + /// svint32_t svldff1uh_gather_[s32]index_s32(svbool_t pg, const uint16_t *base, svint32_t indices) : "LDFF1H Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #1]" + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices); + + /// svuint32_t svldff1uh_gather_[s32]index_u32(svbool_t pg, const uint16_t *base, svint32_t indices) : "LDFF1H Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #1]" + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices); + + /// svint32_t svldff1uh_gather_[u32]index_s32(svbool_t pg, const uint16_t *base, svuint32_t indices) : "LDFF1H Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #1]" + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices); + + /// svuint32_t svldff1uh_gather_[u32]index_u32(svbool_t pg, const uint16_t *base, svuint32_t indices) : "LDFF1H Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #1]" + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices); + + /// svint64_t svldff1uh_gather_[s64]index_s64(svbool_t pg, const uint16_t *base, svint64_t indices) : "LDFF1H Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1]" + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices); + + /// svuint64_t svldff1uh_gather_[s64]index_u64(svbool_t pg, const uint16_t *base, svint64_t indices) : "LDFF1H Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1]" + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices); + + /// svint64_t svldff1uh_gather_[u64]index_s64(svbool_t pg, const uint16_t *base, svuint64_t indices) : "LDFF1H Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1]" + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices); + + /// svuint64_t svldff1uh_gather_[u64]index_u64(svbool_t pg, const uint16_t *base, svuint64_t indices) : "LDFF1H Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1]" + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices); + + + /// GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting : Load 32-bit data and zero-extend, first-faulting + + /// svint64_t svldff1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets) : "LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets); + + /// svint64_t svldff1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets) : "LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets); + + /// svuint64_t svldff1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets) : "LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets); + + /// svuint64_t svldff1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets) : "LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets); + + /// svint64_t svldff1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets) : "LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets); + + /// svint64_t svldff1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets) : "LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets); + + /// svuint64_t svldff1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets) : "LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets); + + /// svuint64_t svldff1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets) : "LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets); + + + /// GatherVectorUInt32ZeroExtendFirstFaulting : Load 32-bit data and zero-extend, first-faulting + + /// svint64_t svldff1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) : "LDFF1W Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses); + + /// svint64_t svldff1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) : "LDFF1W Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses); + + /// svuint64_t svldff1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) : "LDFF1W Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses); + + /// svuint64_t svldff1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) : "LDFF1W Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses); + + /// svint64_t svldff1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices) : "LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices); + + /// svint64_t svldff1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices) : "LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices); + + /// svuint64_t svldff1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices) : "LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices); + + /// svuint64_t svldff1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices) : "LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices); + + /// svint64_t svldff1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices) : "LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices); + + /// svint64_t svldff1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices) : "LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices); + + /// svuint64_t svldff1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices) : "LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices); + + /// svuint64_t svldff1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices) : "LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices); + + + /// GatherVectorWithByteOffsetFirstFaulting : Unextended load, first-faulting + + /// svfloat32_t svldff1_gather_[s32]offset[_f32](svbool_t pg, const float32_t *base, svint32_t offsets) : "LDFF1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]" + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, float* address, Vector offsets); + + /// svint32_t svldff1_gather_[s32]offset[_s32](svbool_t pg, const int32_t *base, svint32_t offsets) : "LDFF1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]" + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, int* address, Vector offsets); + + /// svuint32_t svldff1_gather_[s32]offset[_u32](svbool_t pg, const uint32_t *base, svint32_t offsets) : "LDFF1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]" + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, uint* address, Vector offsets); + + /// svfloat32_t svldff1_gather_[u32]offset[_f32](svbool_t pg, const float32_t *base, svuint32_t offsets) : "LDFF1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]" + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, float* address, Vector offsets); + + /// svint32_t svldff1_gather_[u32]offset[_s32](svbool_t pg, const int32_t *base, svuint32_t offsets) : "LDFF1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]" + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, int* address, Vector offsets); + + /// svuint32_t svldff1_gather_[u32]offset[_u32](svbool_t pg, const uint32_t *base, svuint32_t offsets) : "LDFF1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]" + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, uint* address, Vector offsets); + + /// svfloat64_t svldff1_gather_[s64]offset[_f64](svbool_t pg, const float64_t *base, svint64_t offsets) : "LDFF1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, double* address, Vector offsets); + + /// svint64_t svldff1_gather_[s64]offset[_s64](svbool_t pg, const int64_t *base, svint64_t offsets) : "LDFF1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, long* address, Vector offsets); + + /// svuint64_t svldff1_gather_[s64]offset[_u64](svbool_t pg, const uint64_t *base, svint64_t offsets) : "LDFF1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, ulong* address, Vector offsets); + + /// svfloat64_t svldff1_gather_[u64]offset[_f64](svbool_t pg, const float64_t *base, svuint64_t offsets) : "LDFF1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, double* address, Vector offsets); + + /// svint64_t svldff1_gather_[u64]offset[_s64](svbool_t pg, const int64_t *base, svuint64_t offsets) : "LDFF1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, long* address, Vector offsets); + + /// svuint64_t svldff1_gather_[u64]offset[_u64](svbool_t pg, const uint64_t *base, svuint64_t offsets) : "LDFF1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, ulong* address, Vector offsets); + + + /// GetFfr : Read FFR, returning predicate of succesfully loaded elements + + /// svbool_t svrdffr() : "RDFFR Presult.B" + /// svbool_t svrdffr_z(svbool_t pg) : "RDFFR Presult.B, Pg/Z" + public static unsafe Vector GetFfr(); + + /// svbool_t svrdffr() : "RDFFR Presult.B" + /// svbool_t svrdffr_z(svbool_t pg) : "RDFFR Presult.B, Pg/Z" + public static unsafe Vector GetFfr(); + + /// svbool_t svrdffr() : "RDFFR Presult.B" + /// svbool_t svrdffr_z(svbool_t pg) : "RDFFR Presult.B, Pg/Z" + public static unsafe Vector GetFfr(); + + /// svbool_t svrdffr() : "RDFFR Presult.B" + /// svbool_t svrdffr_z(svbool_t pg) : "RDFFR Presult.B, Pg/Z" + public static unsafe Vector GetFfr(); + + /// svbool_t svrdffr() : "RDFFR Presult.B" + /// svbool_t svrdffr_z(svbool_t pg) : "RDFFR Presult.B, Pg/Z" + public static unsafe Vector GetFfr(); + + /// svbool_t svrdffr() : "RDFFR Presult.B" + /// svbool_t svrdffr_z(svbool_t pg) : "RDFFR Presult.B, Pg/Z" + public static unsafe Vector GetFfr(); + + /// svbool_t svrdffr() : "RDFFR Presult.B" + /// svbool_t svrdffr_z(svbool_t pg) : "RDFFR Presult.B, Pg/Z" + public static unsafe Vector GetFfr(); + + /// svbool_t svrdffr() : "RDFFR Presult.B" + /// svbool_t svrdffr_z(svbool_t pg) : "RDFFR Presult.B, Pg/Z" + public static unsafe Vector GetFfr(); + + + /// LoadVectorByteZeroExtendFirstFaulting : Load 8-bit data and zero-extend, first-faulting + + /// svint16_t svldff1ub_s16(svbool_t pg, const uint8_t *base) : "LDFF1B Zresult.H, Pg/Z, [Xarray, Xindex]" or "LDFF1B Zresult.H, Pg/Z, [Xbase, XZR]" + public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address); + + /// svint32_t svldff1ub_s32(svbool_t pg, const uint8_t *base) : "LDFF1B Zresult.S, Pg/Z, [Xarray, Xindex]" or "LDFF1B Zresult.S, Pg/Z, [Xbase, XZR]" + public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address); + + /// svint64_t svldff1ub_s64(svbool_t pg, const uint8_t *base) : "LDFF1B Zresult.D, Pg/Z, [Xarray, Xindex]" or "LDFF1B Zresult.D, Pg/Z, [Xbase, XZR]" + public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address); + + /// svuint16_t svldff1ub_u16(svbool_t pg, const uint8_t *base) : "LDFF1B Zresult.H, Pg/Z, [Xarray, Xindex]" or "LDFF1B Zresult.H, Pg/Z, [Xbase, XZR]" + public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address); + + /// svuint32_t svldff1ub_u32(svbool_t pg, const uint8_t *base) : "LDFF1B Zresult.S, Pg/Z, [Xarray, Xindex]" or "LDFF1B Zresult.S, Pg/Z, [Xbase, XZR]" + public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address); + + /// svuint64_t svldff1ub_u64(svbool_t pg, const uint8_t *base) : "LDFF1B Zresult.D, Pg/Z, [Xarray, Xindex]" or "LDFF1B Zresult.D, Pg/Z, [Xbase, XZR]" + public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address); + + + /// LoadVectorFirstFaulting : Unextended load, first-faulting + + /// svfloat32_t svldff1[_f32](svbool_t pg, const float32_t *base) : "LDFF1W Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2]" or "LDFF1W Zresult.S, Pg/Z, [Xbase, XZR, LSL #2]" + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, float* address); + + /// svfloat64_t svldff1[_f64](svbool_t pg, const float64_t *base) : "LDFF1D Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3]" or "LDFF1D Zresult.D, Pg/Z, [Xbase, XZR, LSL #3]" + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, double* address); + + /// svint8_t svldff1[_s8](svbool_t pg, const int8_t *base) : "LDFF1B Zresult.B, Pg/Z, [Xarray, Xindex]" or "LDFF1B Zresult.B, Pg/Z, [Xbase, XZR]" + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, sbyte* address); + + /// svint16_t svldff1[_s16](svbool_t pg, const int16_t *base) : "LDFF1H Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1]" or "LDFF1H Zresult.H, Pg/Z, [Xbase, XZR, LSL #1]" + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, short* address); + + /// svint32_t svldff1[_s32](svbool_t pg, const int32_t *base) : "LDFF1W Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2]" or "LDFF1W Zresult.S, Pg/Z, [Xbase, XZR, LSL #2]" + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, int* address); + + /// svint64_t svldff1[_s64](svbool_t pg, const int64_t *base) : "LDFF1D Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3]" or "LDFF1D Zresult.D, Pg/Z, [Xbase, XZR, LSL #3]" + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, long* address); + + /// svuint8_t svldff1[_u8](svbool_t pg, const uint8_t *base) : "LDFF1B Zresult.B, Pg/Z, [Xarray, Xindex]" or "LDFF1B Zresult.B, Pg/Z, [Xbase, XZR]" + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, byte* address); + + /// svuint16_t svldff1[_u16](svbool_t pg, const uint16_t *base) : "LDFF1H Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1]" or "LDFF1H Zresult.H, Pg/Z, [Xbase, XZR, LSL #1]" + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, ushort* address); + + /// svuint32_t svldff1[_u32](svbool_t pg, const uint32_t *base) : "LDFF1W Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2]" or "LDFF1W Zresult.S, Pg/Z, [Xbase, XZR, LSL #2]" + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, uint* address); + + /// svuint64_t svldff1[_u64](svbool_t pg, const uint64_t *base) : "LDFF1D Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3]" or "LDFF1D Zresult.D, Pg/Z, [Xbase, XZR, LSL #3]" + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, ulong* address); + + + /// LoadVectorInt16SignExtendFirstFaulting : Load 16-bit data and sign-extend, first-faulting + + /// svint32_t svldff1sh_s32(svbool_t pg, const int16_t *base) : "LDFF1SH Zresult.S, Pg/Z, [Xarray, Xindex, LSL #1]" or "LDFF1SH Zresult.S, Pg/Z, [Xbase, XZR, LSL #1]" + public static unsafe Vector LoadVectorInt16SignExtendFirstFaulting(Vector mask, short* address); + + /// svint64_t svldff1sh_s64(svbool_t pg, const int16_t *base) : "LDFF1SH Zresult.D, Pg/Z, [Xarray, Xindex, LSL #1]" or "LDFF1SH Zresult.D, Pg/Z, [Xbase, XZR, LSL #1]" + public static unsafe Vector LoadVectorInt16SignExtendFirstFaulting(Vector mask, short* address); + + /// svuint32_t svldff1sh_u32(svbool_t pg, const int16_t *base) : "LDFF1SH Zresult.S, Pg/Z, [Xarray, Xindex, LSL #1]" or "LDFF1SH Zresult.S, Pg/Z, [Xbase, XZR, LSL #1]" + public static unsafe Vector LoadVectorInt16SignExtendFirstFaulting(Vector mask, short* address); + + /// svuint64_t svldff1sh_u64(svbool_t pg, const int16_t *base) : "LDFF1SH Zresult.D, Pg/Z, [Xarray, Xindex, LSL #1]" or "LDFF1SH Zresult.D, Pg/Z, [Xbase, XZR, LSL #1]" + public static unsafe Vector LoadVectorInt16SignExtendFirstFaulting(Vector mask, short* address); + + + /// LoadVectorInt32SignExtendFirstFaulting : Load 32-bit data and sign-extend, first-faulting + + /// svint64_t svldff1sw_s64(svbool_t pg, const int32_t *base) : "LDFF1SW Zresult.D, Pg/Z, [Xarray, Xindex, LSL #2]" or "LDFF1SW Zresult.D, Pg/Z, [Xbase, XZR, LSL #2]" + public static unsafe Vector LoadVectorInt32SignExtendFirstFaulting(Vector mask, int* address); + + /// svuint64_t svldff1sw_u64(svbool_t pg, const int32_t *base) : "LDFF1SW Zresult.D, Pg/Z, [Xarray, Xindex, LSL #2]" or "LDFF1SW Zresult.D, Pg/Z, [Xbase, XZR, LSL #2]" + public static unsafe Vector LoadVectorInt32SignExtendFirstFaulting(Vector mask, int* address); + + + /// LoadVectorSByteSignExtendFirstFaulting : Load 8-bit data and sign-extend, first-faulting + + /// svint16_t svldff1sb_s16(svbool_t pg, const int8_t *base) : "LDFF1SB Zresult.H, Pg/Z, [Xarray, Xindex]" or "LDFF1SB Zresult.H, Pg/Z, [Xbase, XZR]" + public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address); + + /// svint32_t svldff1sb_s32(svbool_t pg, const int8_t *base) : "LDFF1SB Zresult.S, Pg/Z, [Xarray, Xindex]" or "LDFF1SB Zresult.S, Pg/Z, [Xbase, XZR]" + public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address); + + /// svint64_t svldff1sb_s64(svbool_t pg, const int8_t *base) : "LDFF1SB Zresult.D, Pg/Z, [Xarray, Xindex]" or "LDFF1SB Zresult.D, Pg/Z, [Xbase, XZR]" + public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address); + + /// svuint16_t svldff1sb_u16(svbool_t pg, const int8_t *base) : "LDFF1SB Zresult.H, Pg/Z, [Xarray, Xindex]" or "LDFF1SB Zresult.H, Pg/Z, [Xbase, XZR]" + public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address); + + /// svuint32_t svldff1sb_u32(svbool_t pg, const int8_t *base) : "LDFF1SB Zresult.S, Pg/Z, [Xarray, Xindex]" or "LDFF1SB Zresult.S, Pg/Z, [Xbase, XZR]" + public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address); + + /// svuint64_t svldff1sb_u64(svbool_t pg, const int8_t *base) : "LDFF1SB Zresult.D, Pg/Z, [Xarray, Xindex]" or "LDFF1SB Zresult.D, Pg/Z, [Xbase, XZR]" + public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address); + + + /// LoadVectorUInt16ZeroExtendFirstFaulting : Load 16-bit data and zero-extend, first-faulting + + /// svint32_t svldff1uh_s32(svbool_t pg, const uint16_t *base) : "LDFF1H Zresult.S, Pg/Z, [Xarray, Xindex, LSL #1]" or "LDFF1H Zresult.S, Pg/Z, [Xbase, XZR, LSL #1]" + public static unsafe Vector LoadVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address); + + /// svint64_t svldff1uh_s64(svbool_t pg, const uint16_t *base) : "LDFF1H Zresult.D, Pg/Z, [Xarray, Xindex, LSL #1]" or "LDFF1H Zresult.D, Pg/Z, [Xbase, XZR, LSL #1]" + public static unsafe Vector LoadVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address); + + /// svuint32_t svldff1uh_u32(svbool_t pg, const uint16_t *base) : "LDFF1H Zresult.S, Pg/Z, [Xarray, Xindex, LSL #1]" or "LDFF1H Zresult.S, Pg/Z, [Xbase, XZR, LSL #1]" + public static unsafe Vector LoadVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address); + + /// svuint64_t svldff1uh_u64(svbool_t pg, const uint16_t *base) : "LDFF1H Zresult.D, Pg/Z, [Xarray, Xindex, LSL #1]" or "LDFF1H Zresult.D, Pg/Z, [Xbase, XZR, LSL #1]" + public static unsafe Vector LoadVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address); + + + /// LoadVectorUInt32ZeroExtendFirstFaulting : Load 32-bit data and zero-extend, first-faulting + + /// svint64_t svldff1uw_s64(svbool_t pg, const uint32_t *base) : "LDFF1W Zresult.D, Pg/Z, [Xarray, Xindex, LSL #2]" or "LDFF1W Zresult.D, Pg/Z, [Xbase, XZR, LSL #2]" + public static unsafe Vector LoadVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address); + + /// svuint64_t svldff1uw_u64(svbool_t pg, const uint32_t *base) : "LDFF1W Zresult.D, Pg/Z, [Xarray, Xindex, LSL #2]" or "LDFF1W Zresult.D, Pg/Z, [Xbase, XZR, LSL #2]" + public static unsafe Vector LoadVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address); + + + /// SetFfr : Write to the first-fault register + + /// void svwrffr(svbool_t op) : "WRFFR Pop.B" + public static unsafe void SetFfr(Vector value); + + /// void svwrffr(svbool_t op) : "WRFFR Pop.B" + public static unsafe void SetFfr(Vector value); + + /// void svwrffr(svbool_t op) : "WRFFR Pop.B" + public static unsafe void SetFfr(Vector value); + + /// void svwrffr(svbool_t op) : "WRFFR Pop.B" + public static unsafe void SetFfr(Vector value); + + /// void svwrffr(svbool_t op) : "WRFFR Pop.B" + public static unsafe void SetFfr(Vector value); + + /// void svwrffr(svbool_t op) : "WRFFR Pop.B" + public static unsafe void SetFfr(Vector value); + + /// void svwrffr(svbool_t op) : "WRFFR Pop.B" + public static unsafe void SetFfr(Vector value); + + /// void svwrffr(svbool_t op) : "WRFFR Pop.B" + public static unsafe void SetFfr(Vector value); + + + /// total method signatures: 184 + /// total method names: 22 +} + + + /// Rejected: + /// public static unsafe void ClearFfr(); // svsetffr + /// public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1ub_gather[_u32base]_offset_s32 + /// public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1ub_gather[_u32base]_offset_u32 + /// public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1ub_gather[_u64base]_offset_s64 + /// public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1ub_gather[_u64base]_offset_u64 + /// public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses, long index); // svldff1_gather[_u32base]_index_f32 + /// public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses, long index); // svldff1_gather[_u32base]_index_s32 + /// public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses, long index); // svldff1_gather[_u32base]_index_u32 + /// public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses, long index); // svldff1_gather[_u64base]_index_f64 + /// public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses, long index); // svldff1_gather[_u64base]_index_s64 + /// public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses, long index); // svldff1_gather[_u64base]_index_u64 + /// public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, Vector addresses, long index); // svldff1sh_gather[_u32base]_index_s32 + /// public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, Vector addresses, long index); // svldff1sh_gather[_u32base]_index_u32 + /// public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, Vector addresses, long index); // svldff1sh_gather[_u64base]_index_s64 + /// public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, Vector addresses, long index); // svldff1sh_gather[_u64base]_index_u64 + /// public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1sh_gather[_u32base]_offset_s32 + /// public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1sh_gather[_u32base]_offset_u32 + /// public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1sh_gather[_u64base]_offset_s64 + /// public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1sh_gather[_u64base]_offset_u64 + /// public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, Vector addresses, long index); // svldff1sw_gather[_u64base]_index_s64 + /// public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, Vector addresses, long index); // svldff1sw_gather[_u64base]_index_s64 + /// public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, Vector addresses, long index); // svldff1sw_gather[_u64base]_index_u64 + /// public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, Vector addresses, long index); // svldff1sw_gather[_u64base]_index_u64 + /// public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1sw_gather[_u64base]_offset_s64 + /// public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1sw_gather[_u64base]_offset_s64 + /// public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1sw_gather[_u64base]_offset_u64 + /// public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1sw_gather[_u64base]_offset_u64 + /// public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1sb_gather[_u32base]_offset_s32 + /// public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1sb_gather[_u32base]_offset_u32 + /// public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1sb_gather[_u64base]_offset_s64 + /// public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1sb_gather[_u64base]_offset_u64 + /// public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1uh_gather[_u32base]_offset_s32 + /// public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1uh_gather[_u32base]_offset_u32 + /// public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1uh_gather[_u64base]_offset_s64 + /// public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1uh_gather[_u64base]_offset_u64 + /// public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, Vector addresses, long index); // svldff1uh_gather[_u32base]_index_s32 + /// public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, Vector addresses, long index); // svldff1uh_gather[_u32base]_index_u32 + /// public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, Vector addresses, long index); // svldff1uh_gather[_u64base]_index_s64 + /// public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, Vector addresses, long index); // svldff1uh_gather[_u64base]_index_u64 + /// public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1uw_gather[_u64base]_offset_s64 + /// public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1uw_gather[_u64base]_offset_s64 + /// public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1uw_gather[_u64base]_offset_u64 + /// public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1uw_gather[_u64base]_offset_u64 + /// public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses, long index); // svldff1uw_gather[_u64base]_index_s64 + /// public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses, long index); // svldff1uw_gather[_u64base]_index_s64 + /// public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses, long index); // svldff1uw_gather[_u64base]_index_u64 + /// public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses, long index); // svldff1uw_gather[_u64base]_index_u64 + /// public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1_gather[_u32base]_offset_f32 + /// public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1_gather[_u32base]_offset_s32 + /// public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1_gather[_u32base]_offset_u32 + /// public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1_gather[_u64base]_offset_f64 + /// public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1_gather[_u64base]_offset_s64 + /// public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, Vector addresses, long offset); // svldff1_gather[_u64base]_offset_u64 + /// public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, long vnum); // svldff1ub_vnum_s16 + /// public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, long vnum); // svldff1ub_vnum_s32 + /// public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, long vnum); // svldff1ub_vnum_s64 + /// public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, long vnum); // svldff1ub_vnum_u16 + /// public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, long vnum); // svldff1ub_vnum_u32 + /// public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, long vnum); // svldff1ub_vnum_u64 + /// public static unsafe Vector LoadVectorFirstFaulting(Vector mask, float* address, long vnum); // svldff1_vnum[_f32] + /// public static unsafe Vector LoadVectorFirstFaulting(Vector mask, double* address, long vnum); // svldff1_vnum[_f64] + /// public static unsafe Vector LoadVectorFirstFaulting(Vector mask, sbyte* address, long vnum); // svldff1_vnum[_s8] + /// public static unsafe Vector LoadVectorFirstFaulting(Vector mask, short* address, long vnum); // svldff1_vnum[_s16] + /// public static unsafe Vector LoadVectorFirstFaulting(Vector mask, int* address, long vnum); // svldff1_vnum[_s32] + /// public static unsafe Vector LoadVectorFirstFaulting(Vector mask, long* address, long vnum); // svldff1_vnum[_s64] + /// public static unsafe Vector LoadVectorFirstFaulting(Vector mask, byte* address, long vnum); // svldff1_vnum[_u8] + /// public static unsafe Vector LoadVectorFirstFaulting(Vector mask, ushort* address, long vnum); // svldff1_vnum[_u16] + /// public static unsafe Vector LoadVectorFirstFaulting(Vector mask, uint* address, long vnum); // svldff1_vnum[_u32] + /// public static unsafe Vector LoadVectorFirstFaulting(Vector mask, ulong* address, long vnum); // svldff1_vnum[_u64] + /// public static unsafe Vector LoadVectorInt16SignExtendFirstFaulting(Vector mask, short* address, long vnum); // svldff1sh_vnum_s32 + /// public static unsafe Vector LoadVectorInt16SignExtendFirstFaulting(Vector mask, short* address, long vnum); // svldff1sh_vnum_s64 + /// public static unsafe Vector LoadVectorInt16SignExtendFirstFaulting(Vector mask, short* address, long vnum); // svldff1sh_vnum_u32 + /// public static unsafe Vector LoadVectorInt16SignExtendFirstFaulting(Vector mask, short* address, long vnum); // svldff1sh_vnum_u64 + /// public static unsafe Vector LoadVectorInt32SignExtendFirstFaulting(Vector mask, int* address, long vnum); // svldff1sw_vnum_s64 + /// public static unsafe Vector LoadVectorInt32SignExtendFirstFaulting(Vector mask, int* address, long vnum); // svldff1sw_vnum_u64 + /// public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, long vnum); // svldff1sb_vnum_s16 + /// public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, long vnum); // svldff1sb_vnum_s32 + /// public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, long vnum); // svldff1sb_vnum_s64 + /// public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, long vnum); // svldff1sb_vnum_u16 + /// public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, long vnum); // svldff1sb_vnum_u32 + /// public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, long vnum); // svldff1sb_vnum_u64 + /// public static unsafe Vector LoadVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, long vnum); // svldff1uh_vnum_s32 + /// public static unsafe Vector LoadVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, long vnum); // svldff1uh_vnum_s64 + /// public static unsafe Vector LoadVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, long vnum); // svldff1uh_vnum_u32 + /// public static unsafe Vector LoadVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, long vnum); // svldff1uh_vnum_u64 + /// public static unsafe Vector LoadVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, long vnum); // svldff1uw_vnum_s64 + /// public static unsafe Vector LoadVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, long vnum); // svldff1uw_vnum_u64 + /// Total Rejected: 87 + + /// Total ACLE covered across API: 279 + diff --git a/sve_api/out_api/apiraw_FEAT_SVE__fp.cs b/sve_api/out_api/apiraw_FEAT_SVE__fp.cs new file mode 100644 index 0000000000000..6d8176d3eea10 --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_SVE__fp.cs @@ -0,0 +1,432 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: fp +{ + + /// T: float, double + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation); // FCADD // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector AddSequentialAcross(Vector initial, Vector value); // FADDA // predicated + + /// T: [double, float], [double, int], [double, long], [double, uint], [double, ulong] + public static unsafe Vector ConvertToDouble(Vector value); // FCVT or SCVTF or UCVTF // predicated, MOVPRFX + + /// T: [int, float], [int, double] + public static unsafe Vector ConvertToInt32(Vector value); // FCVTZS // predicated, MOVPRFX + + /// T: [long, float], [long, double] + public static unsafe Vector ConvertToInt64(Vector value); // FCVTZS // predicated, MOVPRFX + + /// T: [float, double], [float, int], [float, long], [float, uint], [float, ulong] + public static unsafe Vector ConvertToSingle(Vector value); // FCVT or SCVTF or UCVTF // predicated, MOVPRFX + + /// T: [uint, float], [uint, double] + public static unsafe Vector ConvertToUInt32(Vector value); // FCVTZU // predicated, MOVPRFX + + /// T: [ulong, float], [ulong, double] + public static unsafe Vector ConvertToUInt64(Vector value); // FCVTZU // predicated, MOVPRFX + + /// T: [float, uint], [double, ulong] + public static unsafe Vector FloatingPointExponentialAccelerator(Vector value); // FEXPA + + /// T: float, double + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation); // FCMLA // predicated, MOVPRFX + + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation); // FCMLA // MOVPRFX + + /// T: float, double + public static unsafe Vector ReciprocalEstimate(Vector value); // FRECPE + + /// T: float, double + public static unsafe Vector ReciprocalExponent(Vector value); // FRECPX // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector ReciprocalSqrtEstimate(Vector value); // FRSQRTE + + /// T: float, double + public static unsafe Vector ReciprocalSqrtStep(Vector left, Vector right); // FRSQRTS + + /// T: float, double + public static unsafe Vector ReciprocalStep(Vector left, Vector right); // FRECPS + + /// T: float, double + public static unsafe Vector RoundAwayFromZero(Vector value); // FRINTA // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector RoundToNearest(Vector value); // FRINTN // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector RoundToNegativeInfinity(Vector value); // FRINTM // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector RoundToPositiveInfinity(Vector value); // FRINTP // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector RoundToZero(Vector value); // FRINTZ // predicated, MOVPRFX + + /// T: [float, int], [double, long] + public static unsafe Vector Scale(Vector left, Vector right); // FSCALE // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector Sqrt(Vector value); // FSQRT // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector TrigonometricMultiplyAddCoefficient(Vector left, Vector right, [ConstantExpected] byte control); // FTMAD // MOVPRFX + + /// T: [float, uint], [double, ulong] + public static unsafe Vector TrigonometricSelectCoefficient(Vector value, Vector selector); // FTSSEL + + /// T: [float, uint], [double, ulong] + public static unsafe Vector TrigonometricStartingValue(Vector value, Vector sign); // FTSMUL + + /// total method signatures: 26 + +} + + +/// Full API +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: fp +{ + /// AddRotateComplex : Complex add with rotate + + /// svfloat32_t svcadd[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, uint64_t imm_rotation) : "FCADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S, #imm_rotation" or "MOVPRFX Zresult, Zop1; FCADD Zresult.S, Pg/M, Zresult.S, Zop2.S, #imm_rotation" + /// svfloat32_t svcadd[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, uint64_t imm_rotation) : "FCADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S, #imm_rotation" or "MOVPRFX Zresult, Zop1; FCADD Zresult.S, Pg/M, Zresult.S, Zop2.S, #imm_rotation" + /// svfloat32_t svcadd[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, uint64_t imm_rotation) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; FCADD Zresult.S, Pg/M, Zresult.S, Zop2.S, #imm_rotation" + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation); + + /// svfloat64_t svcadd[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, uint64_t imm_rotation) : "FCADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D, #imm_rotation" or "MOVPRFX Zresult, Zop1; FCADD Zresult.D, Pg/M, Zresult.D, Zop2.D, #imm_rotation" + /// svfloat64_t svcadd[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, uint64_t imm_rotation) : "FCADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D, #imm_rotation" or "MOVPRFX Zresult, Zop1; FCADD Zresult.D, Pg/M, Zresult.D, Zop2.D, #imm_rotation" + /// svfloat64_t svcadd[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, uint64_t imm_rotation) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; FCADD Zresult.D, Pg/M, Zresult.D, Zop2.D, #imm_rotation" + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation); + + + /// AddSequentialAcross : Add reduction (strictly-ordered) + + /// float32_t svadda[_f32](svbool_t pg, float32_t initial, svfloat32_t op) : "FADDA Stied, Pg, Stied, Zop.S" + public static unsafe Vector AddSequentialAcross(Vector initial, Vector value); + + /// float64_t svadda[_f64](svbool_t pg, float64_t initial, svfloat64_t op) : "FADDA Dtied, Pg, Dtied, Zop.D" + public static unsafe Vector AddSequentialAcross(Vector initial, Vector value); + + + /// ConvertToDouble : Floating-point convert + + /// svfloat64_t svcvt_f64[_f32]_m(svfloat64_t inactive, svbool_t pg, svfloat32_t op) : "FCVT Ztied.D, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; FCVT Zresult.D, Pg/M, Zop.S" + /// svfloat64_t svcvt_f64[_f32]_x(svbool_t pg, svfloat32_t op) : "FCVT Ztied.D, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; FCVT Zresult.D, Pg/M, Zop.S" + /// svfloat64_t svcvt_f64[_f32]_z(svbool_t pg, svfloat32_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; FCVT Zresult.D, Pg/M, Zop.S" + public static unsafe Vector ConvertToDouble(Vector value); + + /// svfloat64_t svcvt_f64[_s32]_m(svfloat64_t inactive, svbool_t pg, svint32_t op) : "SCVTF Ztied.D, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; SCVTF Zresult.D, Pg/M, Zop.S" + /// svfloat64_t svcvt_f64[_s32]_x(svbool_t pg, svint32_t op) : "SCVTF Ztied.D, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; SCVTF Zresult.D, Pg/M, Zop.S" + /// svfloat64_t svcvt_f64[_s32]_z(svbool_t pg, svint32_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; SCVTF Zresult.D, Pg/M, Zop.S" + public static unsafe Vector ConvertToDouble(Vector value); + + /// svfloat64_t svcvt_f64[_s64]_m(svfloat64_t inactive, svbool_t pg, svint64_t op) : "SCVTF Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; SCVTF Zresult.D, Pg/M, Zop.D" + /// svfloat64_t svcvt_f64[_s64]_x(svbool_t pg, svint64_t op) : "SCVTF Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; SCVTF Zresult.D, Pg/M, Zop.D" + /// svfloat64_t svcvt_f64[_s64]_z(svbool_t pg, svint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; SCVTF Zresult.D, Pg/M, Zop.D" + public static unsafe Vector ConvertToDouble(Vector value); + + /// svfloat64_t svcvt_f64[_u32]_m(svfloat64_t inactive, svbool_t pg, svuint32_t op) : "UCVTF Ztied.D, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; UCVTF Zresult.D, Pg/M, Zop.S" + /// svfloat64_t svcvt_f64[_u32]_x(svbool_t pg, svuint32_t op) : "UCVTF Ztied.D, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; UCVTF Zresult.D, Pg/M, Zop.S" + /// svfloat64_t svcvt_f64[_u32]_z(svbool_t pg, svuint32_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; UCVTF Zresult.D, Pg/M, Zop.S" + public static unsafe Vector ConvertToDouble(Vector value); + + /// svfloat64_t svcvt_f64[_u64]_m(svfloat64_t inactive, svbool_t pg, svuint64_t op) : "UCVTF Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; UCVTF Zresult.D, Pg/M, Zop.D" + /// svfloat64_t svcvt_f64[_u64]_x(svbool_t pg, svuint64_t op) : "UCVTF Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; UCVTF Zresult.D, Pg/M, Zop.D" + /// svfloat64_t svcvt_f64[_u64]_z(svbool_t pg, svuint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; UCVTF Zresult.D, Pg/M, Zop.D" + public static unsafe Vector ConvertToDouble(Vector value); + + + /// ConvertToInt32 : Floating-point convert + + /// svint32_t svcvt_s32[_f32]_m(svint32_t inactive, svbool_t pg, svfloat32_t op) : "FCVTZS Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; FCVTZS Zresult.S, Pg/M, Zop.S" + /// svint32_t svcvt_s32[_f32]_x(svbool_t pg, svfloat32_t op) : "FCVTZS Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; FCVTZS Zresult.S, Pg/M, Zop.S" + /// svint32_t svcvt_s32[_f32]_z(svbool_t pg, svfloat32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; FCVTZS Zresult.S, Pg/M, Zop.S" + public static unsafe Vector ConvertToInt32(Vector value); + + /// svint32_t svcvt_s32[_f64]_m(svint32_t inactive, svbool_t pg, svfloat64_t op) : "FCVTZS Ztied.S, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; FCVTZS Zresult.S, Pg/M, Zop.D" + /// svint32_t svcvt_s32[_f64]_x(svbool_t pg, svfloat64_t op) : "FCVTZS Ztied.S, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; FCVTZS Zresult.S, Pg/M, Zop.D" + /// svint32_t svcvt_s32[_f64]_z(svbool_t pg, svfloat64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; FCVTZS Zresult.S, Pg/M, Zop.D" + public static unsafe Vector ConvertToInt32(Vector value); + + + /// ConvertToInt64 : Floating-point convert + + /// svint64_t svcvt_s64[_f32]_m(svint64_t inactive, svbool_t pg, svfloat32_t op) : "FCVTZS Ztied.D, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; FCVTZS Zresult.D, Pg/M, Zop.S" + /// svint64_t svcvt_s64[_f32]_x(svbool_t pg, svfloat32_t op) : "FCVTZS Ztied.D, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; FCVTZS Zresult.D, Pg/M, Zop.S" + /// svint64_t svcvt_s64[_f32]_z(svbool_t pg, svfloat32_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; FCVTZS Zresult.D, Pg/M, Zop.S" + public static unsafe Vector ConvertToInt64(Vector value); + + /// svint64_t svcvt_s64[_f64]_m(svint64_t inactive, svbool_t pg, svfloat64_t op) : "FCVTZS Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; FCVTZS Zresult.D, Pg/M, Zop.D" + /// svint64_t svcvt_s64[_f64]_x(svbool_t pg, svfloat64_t op) : "FCVTZS Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; FCVTZS Zresult.D, Pg/M, Zop.D" + /// svint64_t svcvt_s64[_f64]_z(svbool_t pg, svfloat64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; FCVTZS Zresult.D, Pg/M, Zop.D" + public static unsafe Vector ConvertToInt64(Vector value); + + + /// ConvertToSingle : Floating-point convert + + /// svfloat32_t svcvt_f32[_f64]_m(svfloat32_t inactive, svbool_t pg, svfloat64_t op) : "FCVT Ztied.S, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; FCVT Zresult.S, Pg/M, Zop.D" + /// svfloat32_t svcvt_f32[_f64]_x(svbool_t pg, svfloat64_t op) : "FCVT Ztied.S, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; FCVT Zresult.S, Pg/M, Zop.D" + /// svfloat32_t svcvt_f32[_f64]_z(svbool_t pg, svfloat64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; FCVT Zresult.S, Pg/M, Zop.D" + public static unsafe Vector ConvertToSingle(Vector value); + + /// svfloat32_t svcvt_f32[_s32]_m(svfloat32_t inactive, svbool_t pg, svint32_t op) : "SCVTF Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; SCVTF Zresult.S, Pg/M, Zop.S" + /// svfloat32_t svcvt_f32[_s32]_x(svbool_t pg, svint32_t op) : "SCVTF Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; SCVTF Zresult.S, Pg/M, Zop.S" + /// svfloat32_t svcvt_f32[_s32]_z(svbool_t pg, svint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; SCVTF Zresult.S, Pg/M, Zop.S" + public static unsafe Vector ConvertToSingle(Vector value); + + /// svfloat32_t svcvt_f32[_s64]_m(svfloat32_t inactive, svbool_t pg, svint64_t op) : "SCVTF Ztied.S, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; SCVTF Zresult.S, Pg/M, Zop.D" + /// svfloat32_t svcvt_f32[_s64]_x(svbool_t pg, svint64_t op) : "SCVTF Ztied.S, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; SCVTF Zresult.S, Pg/M, Zop.D" + /// svfloat32_t svcvt_f32[_s64]_z(svbool_t pg, svint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; SCVTF Zresult.S, Pg/M, Zop.D" + public static unsafe Vector ConvertToSingle(Vector value); + + /// svfloat32_t svcvt_f32[_u32]_m(svfloat32_t inactive, svbool_t pg, svuint32_t op) : "UCVTF Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; UCVTF Zresult.S, Pg/M, Zop.S" + /// svfloat32_t svcvt_f32[_u32]_x(svbool_t pg, svuint32_t op) : "UCVTF Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; UCVTF Zresult.S, Pg/M, Zop.S" + /// svfloat32_t svcvt_f32[_u32]_z(svbool_t pg, svuint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; UCVTF Zresult.S, Pg/M, Zop.S" + public static unsafe Vector ConvertToSingle(Vector value); + + /// svfloat32_t svcvt_f32[_u64]_m(svfloat32_t inactive, svbool_t pg, svuint64_t op) : "UCVTF Ztied.S, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; UCVTF Zresult.S, Pg/M, Zop.D" + /// svfloat32_t svcvt_f32[_u64]_x(svbool_t pg, svuint64_t op) : "UCVTF Ztied.S, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; UCVTF Zresult.S, Pg/M, Zop.D" + /// svfloat32_t svcvt_f32[_u64]_z(svbool_t pg, svuint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; UCVTF Zresult.S, Pg/M, Zop.D" + public static unsafe Vector ConvertToSingle(Vector value); + + + /// ConvertToUInt32 : Floating-point convert + + /// svuint32_t svcvt_u32[_f32]_m(svuint32_t inactive, svbool_t pg, svfloat32_t op) : "FCVTZU Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; FCVTZU Zresult.S, Pg/M, Zop.S" + /// svuint32_t svcvt_u32[_f32]_x(svbool_t pg, svfloat32_t op) : "FCVTZU Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; FCVTZU Zresult.S, Pg/M, Zop.S" + /// svuint32_t svcvt_u32[_f32]_z(svbool_t pg, svfloat32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; FCVTZU Zresult.S, Pg/M, Zop.S" + public static unsafe Vector ConvertToUInt32(Vector value); + + /// svuint32_t svcvt_u32[_f64]_m(svuint32_t inactive, svbool_t pg, svfloat64_t op) : "FCVTZU Ztied.S, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; FCVTZU Zresult.S, Pg/M, Zop.D" + /// svuint32_t svcvt_u32[_f64]_x(svbool_t pg, svfloat64_t op) : "FCVTZU Ztied.S, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; FCVTZU Zresult.S, Pg/M, Zop.D" + /// svuint32_t svcvt_u32[_f64]_z(svbool_t pg, svfloat64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; FCVTZU Zresult.S, Pg/M, Zop.D" + public static unsafe Vector ConvertToUInt32(Vector value); + + + /// ConvertToUInt64 : Floating-point convert + + /// svuint64_t svcvt_u64[_f32]_m(svuint64_t inactive, svbool_t pg, svfloat32_t op) : "FCVTZU Ztied.D, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; FCVTZU Zresult.D, Pg/M, Zop.S" + /// svuint64_t svcvt_u64[_f32]_x(svbool_t pg, svfloat32_t op) : "FCVTZU Ztied.D, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; FCVTZU Zresult.D, Pg/M, Zop.S" + /// svuint64_t svcvt_u64[_f32]_z(svbool_t pg, svfloat32_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; FCVTZU Zresult.D, Pg/M, Zop.S" + public static unsafe Vector ConvertToUInt64(Vector value); + + /// svuint64_t svcvt_u64[_f64]_m(svuint64_t inactive, svbool_t pg, svfloat64_t op) : "FCVTZU Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; FCVTZU Zresult.D, Pg/M, Zop.D" + /// svuint64_t svcvt_u64[_f64]_x(svbool_t pg, svfloat64_t op) : "FCVTZU Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; FCVTZU Zresult.D, Pg/M, Zop.D" + /// svuint64_t svcvt_u64[_f64]_z(svbool_t pg, svfloat64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; FCVTZU Zresult.D, Pg/M, Zop.D" + public static unsafe Vector ConvertToUInt64(Vector value); + + + /// FloatingPointExponentialAccelerator : Floating-point exponential accelerator + + /// svfloat32_t svexpa[_f32](svuint32_t op) : "FEXPA Zresult.S, Zop.S" + public static unsafe Vector FloatingPointExponentialAccelerator(Vector value); + + /// svfloat64_t svexpa[_f64](svuint64_t op) : "FEXPA Zresult.D, Zop.D" + public static unsafe Vector FloatingPointExponentialAccelerator(Vector value); + + + /// MultiplyAddRotateComplex : Complex multiply-add with rotate + + /// svfloat32_t svcmla[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_rotation) : "FCMLA Ztied1.S, Pg/M, Zop2.S, Zop3.S, #imm_rotation" or "MOVPRFX Zresult, Zop1; FCMLA Zresult.S, Pg/M, Zop2.S, Zop3.S, #imm_rotation" + /// svfloat32_t svcmla[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_rotation) : "FCMLA Ztied1.S, Pg/M, Zop2.S, Zop3.S, #imm_rotation" or "MOVPRFX Zresult, Zop1; FCMLA Zresult.S, Pg/M, Zop2.S, Zop3.S, #imm_rotation" + /// svfloat32_t svcmla[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_rotation) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; FCMLA Zresult.S, Pg/M, Zop2.S, Zop3.S, #imm_rotation" + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation); + + /// svfloat64_t svcmla[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3, uint64_t imm_rotation) : "FCMLA Ztied1.D, Pg/M, Zop2.D, Zop3.D, #imm_rotation" or "MOVPRFX Zresult, Zop1; FCMLA Zresult.D, Pg/M, Zop2.D, Zop3.D, #imm_rotation" + /// svfloat64_t svcmla[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3, uint64_t imm_rotation) : "FCMLA Ztied1.D, Pg/M, Zop2.D, Zop3.D, #imm_rotation" or "MOVPRFX Zresult, Zop1; FCMLA Zresult.D, Pg/M, Zop2.D, Zop3.D, #imm_rotation" + /// svfloat64_t svcmla[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3, uint64_t imm_rotation) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; FCMLA Zresult.D, Pg/M, Zop2.D, Zop3.D, #imm_rotation" + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation); + + + /// MultiplyAddRotateComplexBySelectedScalar : Complex multiply-add with rotate + + /// svfloat32_t svcmla_lane[_f32](svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_index, uint64_t imm_rotation) : "FCMLA Ztied1.S, Zop2.S, Zop3.S[imm_index], #imm_rotation" or "MOVPRFX Zresult, Zop1; FCMLA Zresult.S, Zop2.S, Zop3.S[imm_index], #imm_rotation" + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation); + + + /// ReciprocalEstimate : Reciprocal estimate + + /// svfloat32_t svrecpe[_f32](svfloat32_t op) : "FRECPE Zresult.S, Zop.S" + public static unsafe Vector ReciprocalEstimate(Vector value); + + /// svfloat64_t svrecpe[_f64](svfloat64_t op) : "FRECPE Zresult.D, Zop.D" + public static unsafe Vector ReciprocalEstimate(Vector value); + + + /// ReciprocalExponent : Reciprocal exponent + + /// svfloat32_t svrecpx[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) : "FRECPX Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; FRECPX Zresult.S, Pg/M, Zop.S" + /// svfloat32_t svrecpx[_f32]_x(svbool_t pg, svfloat32_t op) : "FRECPX Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; FRECPX Zresult.S, Pg/M, Zop.S" + /// svfloat32_t svrecpx[_f32]_z(svbool_t pg, svfloat32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; FRECPX Zresult.S, Pg/M, Zop.S" + public static unsafe Vector ReciprocalExponent(Vector value); + + /// svfloat64_t svrecpx[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) : "FRECPX Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; FRECPX Zresult.D, Pg/M, Zop.D" + /// svfloat64_t svrecpx[_f64]_x(svbool_t pg, svfloat64_t op) : "FRECPX Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; FRECPX Zresult.D, Pg/M, Zop.D" + /// svfloat64_t svrecpx[_f64]_z(svbool_t pg, svfloat64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; FRECPX Zresult.D, Pg/M, Zop.D" + public static unsafe Vector ReciprocalExponent(Vector value); + + + /// ReciprocalSqrtEstimate : Reciprocal square root estimate + + /// svfloat32_t svrsqrte[_f32](svfloat32_t op) : "FRSQRTE Zresult.S, Zop.S" + public static unsafe Vector ReciprocalSqrtEstimate(Vector value); + + /// svfloat64_t svrsqrte[_f64](svfloat64_t op) : "FRSQRTE Zresult.D, Zop.D" + public static unsafe Vector ReciprocalSqrtEstimate(Vector value); + + + /// ReciprocalSqrtStep : Reciprocal square root step + + /// svfloat32_t svrsqrts[_f32](svfloat32_t op1, svfloat32_t op2) : "FRSQRTS Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector ReciprocalSqrtStep(Vector left, Vector right); + + /// svfloat64_t svrsqrts[_f64](svfloat64_t op1, svfloat64_t op2) : "FRSQRTS Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector ReciprocalSqrtStep(Vector left, Vector right); + + + /// ReciprocalStep : Reciprocal step + + /// svfloat32_t svrecps[_f32](svfloat32_t op1, svfloat32_t op2) : "FRECPS Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector ReciprocalStep(Vector left, Vector right); + + /// svfloat64_t svrecps[_f64](svfloat64_t op1, svfloat64_t op2) : "FRECPS Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector ReciprocalStep(Vector left, Vector right); + + + /// RoundAwayFromZero : Round to nearest, ties away from zero + + /// svfloat32_t svrinta[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) : "FRINTA Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; FRINTA Zresult.S, Pg/M, Zop.S" + /// svfloat32_t svrinta[_f32]_x(svbool_t pg, svfloat32_t op) : "FRINTA Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; FRINTA Zresult.S, Pg/M, Zop.S" + /// svfloat32_t svrinta[_f32]_z(svbool_t pg, svfloat32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; FRINTA Zresult.S, Pg/M, Zop.S" + public static unsafe Vector RoundAwayFromZero(Vector value); + + /// svfloat64_t svrinta[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) : "FRINTA Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; FRINTA Zresult.D, Pg/M, Zop.D" + /// svfloat64_t svrinta[_f64]_x(svbool_t pg, svfloat64_t op) : "FRINTA Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; FRINTA Zresult.D, Pg/M, Zop.D" + /// svfloat64_t svrinta[_f64]_z(svbool_t pg, svfloat64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; FRINTA Zresult.D, Pg/M, Zop.D" + public static unsafe Vector RoundAwayFromZero(Vector value); + + + /// RoundToNearest : Round to nearest, ties to even + + /// svfloat32_t svrintn[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) : "FRINTN Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; FRINTN Zresult.S, Pg/M, Zop.S" + /// svfloat32_t svrintn[_f32]_x(svbool_t pg, svfloat32_t op) : "FRINTN Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; FRINTN Zresult.S, Pg/M, Zop.S" + /// svfloat32_t svrintn[_f32]_z(svbool_t pg, svfloat32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; FRINTN Zresult.S, Pg/M, Zop.S" + public static unsafe Vector RoundToNearest(Vector value); + + /// svfloat64_t svrintn[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) : "FRINTN Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; FRINTN Zresult.D, Pg/M, Zop.D" + /// svfloat64_t svrintn[_f64]_x(svbool_t pg, svfloat64_t op) : "FRINTN Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; FRINTN Zresult.D, Pg/M, Zop.D" + /// svfloat64_t svrintn[_f64]_z(svbool_t pg, svfloat64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; FRINTN Zresult.D, Pg/M, Zop.D" + public static unsafe Vector RoundToNearest(Vector value); + + + /// RoundToNegativeInfinity : Round towards -∞ + + /// svfloat32_t svrintm[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) : "FRINTM Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; FRINTM Zresult.S, Pg/M, Zop.S" + /// svfloat32_t svrintm[_f32]_x(svbool_t pg, svfloat32_t op) : "FRINTM Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; FRINTM Zresult.S, Pg/M, Zop.S" + /// svfloat32_t svrintm[_f32]_z(svbool_t pg, svfloat32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; FRINTM Zresult.S, Pg/M, Zop.S" + public static unsafe Vector RoundToNegativeInfinity(Vector value); + + /// svfloat64_t svrintm[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) : "FRINTM Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; FRINTM Zresult.D, Pg/M, Zop.D" + /// svfloat64_t svrintm[_f64]_x(svbool_t pg, svfloat64_t op) : "FRINTM Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; FRINTM Zresult.D, Pg/M, Zop.D" + /// svfloat64_t svrintm[_f64]_z(svbool_t pg, svfloat64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; FRINTM Zresult.D, Pg/M, Zop.D" + public static unsafe Vector RoundToNegativeInfinity(Vector value); + + + /// RoundToPositiveInfinity : Round towards +∞ + + /// svfloat32_t svrintp[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) : "FRINTP Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; FRINTP Zresult.S, Pg/M, Zop.S" + /// svfloat32_t svrintp[_f32]_x(svbool_t pg, svfloat32_t op) : "FRINTP Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; FRINTP Zresult.S, Pg/M, Zop.S" + /// svfloat32_t svrintp[_f32]_z(svbool_t pg, svfloat32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; FRINTP Zresult.S, Pg/M, Zop.S" + public static unsafe Vector RoundToPositiveInfinity(Vector value); + + /// svfloat64_t svrintp[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) : "FRINTP Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; FRINTP Zresult.D, Pg/M, Zop.D" + /// svfloat64_t svrintp[_f64]_x(svbool_t pg, svfloat64_t op) : "FRINTP Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; FRINTP Zresult.D, Pg/M, Zop.D" + /// svfloat64_t svrintp[_f64]_z(svbool_t pg, svfloat64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; FRINTP Zresult.D, Pg/M, Zop.D" + public static unsafe Vector RoundToPositiveInfinity(Vector value); + + + /// RoundToZero : Round towards zero + + /// svfloat32_t svrintz[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) : "FRINTZ Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; FRINTZ Zresult.S, Pg/M, Zop.S" + /// svfloat32_t svrintz[_f32]_x(svbool_t pg, svfloat32_t op) : "FRINTZ Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; FRINTZ Zresult.S, Pg/M, Zop.S" + /// svfloat32_t svrintz[_f32]_z(svbool_t pg, svfloat32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; FRINTZ Zresult.S, Pg/M, Zop.S" + public static unsafe Vector RoundToZero(Vector value); + + /// svfloat64_t svrintz[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) : "FRINTZ Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; FRINTZ Zresult.D, Pg/M, Zop.D" + /// svfloat64_t svrintz[_f64]_x(svbool_t pg, svfloat64_t op) : "FRINTZ Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; FRINTZ Zresult.D, Pg/M, Zop.D" + /// svfloat64_t svrintz[_f64]_z(svbool_t pg, svfloat64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; FRINTZ Zresult.D, Pg/M, Zop.D" + public static unsafe Vector RoundToZero(Vector value); + + + /// Scale : Adjust exponent + + /// svfloat32_t svscale[_f32]_m(svbool_t pg, svfloat32_t op1, svint32_t op2) : "FSCALE Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; FSCALE Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svfloat32_t svscale[_f32]_x(svbool_t pg, svfloat32_t op1, svint32_t op2) : "FSCALE Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; FSCALE Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svfloat32_t svscale[_f32]_z(svbool_t pg, svfloat32_t op1, svint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; FSCALE Zresult.S, Pg/M, Zresult.S, Zop2.S" + public static unsafe Vector Scale(Vector left, Vector right); + + /// svfloat64_t svscale[_f64]_m(svbool_t pg, svfloat64_t op1, svint64_t op2) : "FSCALE Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; FSCALE Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svfloat64_t svscale[_f64]_x(svbool_t pg, svfloat64_t op1, svint64_t op2) : "FSCALE Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; FSCALE Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svfloat64_t svscale[_f64]_z(svbool_t pg, svfloat64_t op1, svint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; FSCALE Zresult.D, Pg/M, Zresult.D, Zop2.D" + public static unsafe Vector Scale(Vector left, Vector right); + + + /// Sqrt : Square root + + /// svfloat32_t svsqrt[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) : "FSQRT Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; FSQRT Zresult.S, Pg/M, Zop.S" + /// svfloat32_t svsqrt[_f32]_x(svbool_t pg, svfloat32_t op) : "FSQRT Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; FSQRT Zresult.S, Pg/M, Zop.S" + /// svfloat32_t svsqrt[_f32]_z(svbool_t pg, svfloat32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; FSQRT Zresult.S, Pg/M, Zop.S" + public static unsafe Vector Sqrt(Vector value); + + /// svfloat64_t svsqrt[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) : "FSQRT Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; FSQRT Zresult.D, Pg/M, Zop.D" + /// svfloat64_t svsqrt[_f64]_x(svbool_t pg, svfloat64_t op) : "FSQRT Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; FSQRT Zresult.D, Pg/M, Zop.D" + /// svfloat64_t svsqrt[_f64]_z(svbool_t pg, svfloat64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; FSQRT Zresult.D, Pg/M, Zop.D" + public static unsafe Vector Sqrt(Vector value); + + + /// TrigonometricMultiplyAddCoefficient : Trigonometric multiply-add coefficient + + /// svfloat32_t svtmad[_f32](svfloat32_t op1, svfloat32_t op2, uint64_t imm3) : "FTMAD Ztied1.S, Ztied1.S, Zop2.S, #imm3" or "MOVPRFX Zresult, Zop1; FTMAD Zresult.S, Zresult.S, Zop2.S, #imm3" + public static unsafe Vector TrigonometricMultiplyAddCoefficient(Vector left, Vector right, [ConstantExpected] byte control); + + /// svfloat64_t svtmad[_f64](svfloat64_t op1, svfloat64_t op2, uint64_t imm3) : "FTMAD Ztied1.D, Ztied1.D, Zop2.D, #imm3" or "MOVPRFX Zresult, Zop1; FTMAD Zresult.D, Zresult.D, Zop2.D, #imm3" + public static unsafe Vector TrigonometricMultiplyAddCoefficient(Vector left, Vector right, [ConstantExpected] byte control); + + + /// TrigonometricSelectCoefficient : Trigonometric select coefficient + + /// svfloat32_t svtssel[_f32](svfloat32_t op1, svuint32_t op2) : "FTSSEL Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector TrigonometricSelectCoefficient(Vector value, Vector selector); + + /// svfloat64_t svtssel[_f64](svfloat64_t op1, svuint64_t op2) : "FTSSEL Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector TrigonometricSelectCoefficient(Vector value, Vector selector); + + + /// TrigonometricStartingValue : Trigonometric starting value + + /// svfloat32_t svtsmul[_f32](svfloat32_t op1, svuint32_t op2) : "FTSMUL Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector TrigonometricStartingValue(Vector value, Vector sign); + + /// svfloat64_t svtsmul[_f64](svfloat64_t op1, svuint64_t op2) : "FTSMUL Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector TrigonometricStartingValue(Vector value, Vector sign); + + + /// total method signatures: 57 + /// total method names: 28 +} + + + /// Rejected: + /// public static unsafe Vector RoundUsingCurrentRoundingModeExact(Vector value); // svrintx[_f32]_m or svrintx[_f32]_x or svrintx[_f32]_z + /// public static unsafe Vector RoundUsingCurrentRoundingModeExact(Vector value); // svrintx[_f64]_m or svrintx[_f64]_x or svrintx[_f64]_z + /// public static unsafe Vector RoundUsingCurrentRoundingModeInexact(Vector value); // svrinti[_f32]_m or svrinti[_f32]_x or svrinti[_f32]_z + /// public static unsafe Vector RoundUsingCurrentRoundingModeInexact(Vector value); // svrinti[_f64]_m or svrinti[_f64]_x or svrinti[_f64]_z + /// public static unsafe Vector Scale(Vector left, int right); // svscale[_n_f32]_m or svscale[_n_f32]_x or svscale[_n_f32]_z + /// public static unsafe Vector Scale(Vector left, long right); // svscale[_n_f64]_m or svscale[_n_f64]_x or svscale[_n_f64]_z + /// Total Rejected: 6 + + /// Total ACLE covered across API: 151 + diff --git a/sve_api/out_api/apiraw_FEAT_SVE__gatherloads.cs b/sve_api/out_api/apiraw_FEAT_SVE__gatherloads.cs new file mode 100644 index 0000000000000..4935a714fea52 --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_SVE__gatherloads.cs @@ -0,0 +1,784 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: gatherloads +{ + + /// T: [ushort, uint], [short, uint], [ushort, ulong], [short, ulong] + public static unsafe void GatherPrefetch16Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); // PRFH + + /// T: [ushort, int], [short, int], [ushort, uint], [short, uint], [ushort, long], [short, long], [ushort, ulong], [short, ulong] + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); // PRFH + + /// T: [uint, uint], [int, uint], [uint, ulong], [int, ulong] + public static unsafe void GatherPrefetch32Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); // PRFW + + /// T: [uint, int], [int, int], [uint, uint], [int, uint], [uint, long], [int, long], [uint, ulong], [int, ulong] + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); // PRFW + + /// T: [ulong, uint], [long, uint], [ulong, ulong], [long, ulong] + public static unsafe void GatherPrefetch64Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); // PRFD + + /// T: [ulong, int], [long, int], [ulong, uint], [long, uint], [ulong, long], [long, long], [ulong, ulong], [long, ulong] + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); // PRFD + + /// T: [byte, uint], [sbyte, uint], [byte, ulong], [sbyte, ulong] + public static unsafe void GatherPrefetch8Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); // PRFB + + /// T: [byte, int], [sbyte, int], [byte, uint], [sbyte, uint], [byte, long], [sbyte, long], [byte, ulong], [sbyte, ulong] + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType); // PRFB + + /// T: [float, uint], [int, uint], [uint, uint], [double, ulong], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVector(Vector mask, Vector addresses); // LD1W or LD1D + + /// T: [float, int], [int, int], [uint, int], [float, uint], [int, uint], [uint, uint], [double, long], [long, long], [ulong, long], [double, ulong], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVector(Vector mask, T* address, Vector indices); // LD1W or LD1D + + /// T: [int, uint], [uint, uint], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, Vector addresses); // LD1B + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices); // LD1B + + /// T: [int, uint], [uint, uint], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, Vector addresses); // LD1SH + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices); // LD1SH + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets); // LD1SH + + /// T: [long, ulong], [int, uint], [ulong, ulong], [uint, uint] + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, Vector addresses); // LD1SW + + /// T: [long, long], [int, int], [ulong, long], [uint, int], [long, ulong], [int, uint], [ulong, ulong], [uint, uint] + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices); // LD1SW + + /// T: [long, long], [int, int], [ulong, long], [uint, int], [long, ulong], [int, uint], [ulong, ulong], [uint, uint] + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets); // LD1SW + + /// T: [int, uint], [uint, uint], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, Vector addresses); // LD1SB + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices); // LD1SB + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets); // LD1H + + /// T: [int, uint], [uint, uint], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, Vector addresses); // LD1H + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices); // LD1H + + /// T: [long, long], [int, int], [ulong, long], [uint, int], [long, ulong], [int, uint], [ulong, ulong], [uint, uint] + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets); // LD1W + + /// T: [long, ulong], [int, uint], [ulong, ulong], [uint, uint] + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, Vector addresses); // LD1W + + /// T: [long, long], [int, int], [ulong, long], [uint, int], [long, ulong], [int, uint], [ulong, ulong], [uint, uint] + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices); // LD1W + + /// T: [float, int], [int, int], [uint, int], [float, uint], [int, uint], [uint, uint], [double, long], [long, long], [ulong, long], [double, ulong], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, T* address, Vector offsets); // LD1W or LD1D + + +public enum SvePrefetchType +{ + LoadL1Temporal = 0, + LoadL1NonTemporal = 1, + LoadL2Temporal = 2, + LoadL2NonTemporal = 3, + LoadL3Temporal = 4, + LoadL3NonTemporal = 5, + StoreL1Temporal = 8, + StoreL1NonTemporal = 9, + StoreL2Temporal = 10, + StoreL2NonTemporal = 11, + StoreL3Temporal = 12, + StoreL3NonTemporal = 13 +}; + + /// total method signatures: 27 + +} + + +/// Full API +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: gatherloads +{ + /// GatherPrefetch16Bit : Prefetch halfwords + + /// void svprfh_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) : "PRFH op, Pg, [Zbases.S, #0]" + public static unsafe void GatherPrefetch16Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfh_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) : "PRFH op, Pg, [Zbases.S, #0]" + public static unsafe void GatherPrefetch16Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfh_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) : "PRFH op, Pg, [Zbases.D, #0]" + public static unsafe void GatherPrefetch16Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfh_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) : "PRFH op, Pg, [Zbases.D, #0]" + public static unsafe void GatherPrefetch16Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfh_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op) : "PRFH op, Pg, [Xbase, Zindices.S, SXTW #1]" + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfh_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op) : "PRFH op, Pg, [Xbase, Zindices.S, SXTW #1]" + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfh_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op) : "PRFH op, Pg, [Xbase, Zindices.S, UXTW #1]" + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfh_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op) : "PRFH op, Pg, [Xbase, Zindices.S, UXTW #1]" + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfh_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op) : "PRFH op, Pg, [Xbase, Zindices.D, LSL #1]" + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfh_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op) : "PRFH op, Pg, [Xbase, Zindices.D, LSL #1]" + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfh_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op) : "PRFH op, Pg, [Xbase, Zindices.D, LSL #1]" + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfh_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op) : "PRFH op, Pg, [Xbase, Zindices.D, LSL #1]" + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + + /// GatherPrefetch32Bit : Prefetch words + + /// void svprfw_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) : "PRFW op, Pg, [Zbases.S, #0]" + public static unsafe void GatherPrefetch32Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfw_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) : "PRFW op, Pg, [Zbases.S, #0]" + public static unsafe void GatherPrefetch32Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfw_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) : "PRFW op, Pg, [Zbases.D, #0]" + public static unsafe void GatherPrefetch32Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfw_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) : "PRFW op, Pg, [Zbases.D, #0]" + public static unsafe void GatherPrefetch32Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfw_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op) : "PRFW op, Pg, [Xbase, Zindices.S, SXTW #2]" + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfw_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op) : "PRFW op, Pg, [Xbase, Zindices.S, SXTW #2]" + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfw_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op) : "PRFW op, Pg, [Xbase, Zindices.S, UXTW #2]" + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfw_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op) : "PRFW op, Pg, [Xbase, Zindices.S, UXTW #2]" + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfw_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op) : "PRFW op, Pg, [Xbase, Zindices.D, LSL #2]" + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfw_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op) : "PRFW op, Pg, [Xbase, Zindices.D, LSL #2]" + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfw_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op) : "PRFW op, Pg, [Xbase, Zindices.D, LSL #2]" + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfw_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op) : "PRFW op, Pg, [Xbase, Zindices.D, LSL #2]" + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + + /// GatherPrefetch64Bit : Prefetch doublewords + + /// void svprfd_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) : "PRFD op, Pg, [Zbases.S, #0]" + public static unsafe void GatherPrefetch64Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfd_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) : "PRFD op, Pg, [Zbases.S, #0]" + public static unsafe void GatherPrefetch64Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfd_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) : "PRFD op, Pg, [Zbases.D, #0]" + public static unsafe void GatherPrefetch64Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfd_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) : "PRFD op, Pg, [Zbases.D, #0]" + public static unsafe void GatherPrefetch64Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfd_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op) : "PRFD op, Pg, [Xbase, Zindices.S, SXTW #3]" + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfd_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op) : "PRFD op, Pg, [Xbase, Zindices.S, SXTW #3]" + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfd_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op) : "PRFD op, Pg, [Xbase, Zindices.S, UXTW #3]" + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfd_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op) : "PRFD op, Pg, [Xbase, Zindices.S, UXTW #3]" + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfd_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op) : "PRFD op, Pg, [Xbase, Zindices.D, LSL #3]" + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfd_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op) : "PRFD op, Pg, [Xbase, Zindices.D, LSL #3]" + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfd_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op) : "PRFD op, Pg, [Xbase, Zindices.D, LSL #3]" + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfd_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op) : "PRFD op, Pg, [Xbase, Zindices.D, LSL #3]" + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + + /// GatherPrefetch8Bit : Prefetch bytes + + /// void svprfb_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) : "PRFB op, Pg, [Zbases.S, #0]" + public static unsafe void GatherPrefetch8Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfb_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) : "PRFB op, Pg, [Zbases.S, #0]" + public static unsafe void GatherPrefetch8Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfb_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) : "PRFB op, Pg, [Zbases.D, #0]" + public static unsafe void GatherPrefetch8Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfb_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) : "PRFB op, Pg, [Zbases.D, #0]" + public static unsafe void GatherPrefetch8Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfb_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, enum svprfop op) : "PRFB op, Pg, [Xbase, Zoffsets.S, SXTW]" + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfb_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, enum svprfop op) : "PRFB op, Pg, [Xbase, Zoffsets.S, SXTW]" + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfb_gather_[u32]offset(svbool_t pg, const void *base, svuint32_t offsets, enum svprfop op) : "PRFB op, Pg, [Xbase, Zoffsets.S, UXTW]" + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfb_gather_[u32]offset(svbool_t pg, const void *base, svuint32_t offsets, enum svprfop op) : "PRFB op, Pg, [Xbase, Zoffsets.S, UXTW]" + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfb_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, enum svprfop op) : "PRFB op, Pg, [Xbase, Zoffsets.D]" + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfb_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, enum svprfop op) : "PRFB op, Pg, [Xbase, Zoffsets.D]" + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfb_gather_[u64]offset(svbool_t pg, const void *base, svuint64_t offsets, enum svprfop op) : "PRFB op, Pg, [Xbase, Zoffsets.D]" + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType); + + /// void svprfb_gather_[u64]offset(svbool_t pg, const void *base, svuint64_t offsets, enum svprfop op) : "PRFB op, Pg, [Xbase, Zoffsets.D]" + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType); + + + /// GatherVector : Unextended load + + /// svfloat32_t svld1_gather[_u32base]_f32(svbool_t pg, svuint32_t bases) : "LD1W Zresult.S, Pg/Z, [Zbases.S, #0]" + public static unsafe Vector GatherVector(Vector mask, Vector addresses); + + /// svint32_t svld1_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) : "LD1W Zresult.S, Pg/Z, [Zbases.S, #0]" + public static unsafe Vector GatherVector(Vector mask, Vector addresses); + + /// svuint32_t svld1_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) : "LD1W Zresult.S, Pg/Z, [Zbases.S, #0]" + public static unsafe Vector GatherVector(Vector mask, Vector addresses); + + /// svfloat64_t svld1_gather[_u64base]_f64(svbool_t pg, svuint64_t bases) : "LD1D Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVector(Vector mask, Vector addresses); + + /// svint64_t svld1_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) : "LD1D Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVector(Vector mask, Vector addresses); + + /// svuint64_t svld1_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) : "LD1D Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVector(Vector mask, Vector addresses); + + /// svfloat32_t svld1_gather_[s32]index[_f32](svbool_t pg, const float32_t *base, svint32_t indices) : "LD1W Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #2]" + public static unsafe Vector GatherVector(Vector mask, float* address, Vector indices); + + /// svint32_t svld1_gather_[s32]index[_s32](svbool_t pg, const int32_t *base, svint32_t indices) : "LD1W Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #2]" + public static unsafe Vector GatherVector(Vector mask, int* address, Vector indices); + + /// svuint32_t svld1_gather_[s32]index[_u32](svbool_t pg, const uint32_t *base, svint32_t indices) : "LD1W Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #2]" + public static unsafe Vector GatherVector(Vector mask, uint* address, Vector indices); + + /// svfloat32_t svld1_gather_[u32]index[_f32](svbool_t pg, const float32_t *base, svuint32_t indices) : "LD1W Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #2]" + public static unsafe Vector GatherVector(Vector mask, float* address, Vector indices); + + /// svint32_t svld1_gather_[u32]index[_s32](svbool_t pg, const int32_t *base, svuint32_t indices) : "LD1W Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #2]" + public static unsafe Vector GatherVector(Vector mask, int* address, Vector indices); + + /// svuint32_t svld1_gather_[u32]index[_u32](svbool_t pg, const uint32_t *base, svuint32_t indices) : "LD1W Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #2]" + public static unsafe Vector GatherVector(Vector mask, uint* address, Vector indices); + + /// svfloat64_t svld1_gather_[s64]index[_f64](svbool_t pg, const float64_t *base, svint64_t indices) : "LD1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3]" + public static unsafe Vector GatherVector(Vector mask, double* address, Vector indices); + + /// svint64_t svld1_gather_[s64]index[_s64](svbool_t pg, const int64_t *base, svint64_t indices) : "LD1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3]" + public static unsafe Vector GatherVector(Vector mask, long* address, Vector indices); + + /// svuint64_t svld1_gather_[s64]index[_u64](svbool_t pg, const uint64_t *base, svint64_t indices) : "LD1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3]" + public static unsafe Vector GatherVector(Vector mask, ulong* address, Vector indices); + + /// svfloat64_t svld1_gather_[u64]index[_f64](svbool_t pg, const float64_t *base, svuint64_t indices) : "LD1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3]" + public static unsafe Vector GatherVector(Vector mask, double* address, Vector indices); + + /// svint64_t svld1_gather_[u64]index[_s64](svbool_t pg, const int64_t *base, svuint64_t indices) : "LD1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3]" + public static unsafe Vector GatherVector(Vector mask, long* address, Vector indices); + + /// svuint64_t svld1_gather_[u64]index[_u64](svbool_t pg, const uint64_t *base, svuint64_t indices) : "LD1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3]" + public static unsafe Vector GatherVector(Vector mask, ulong* address, Vector indices); + + + /// GatherVectorByteZeroExtend : Load 8-bit data and zero-extend + + /// svint32_t svld1ub_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) : "LD1B Zresult.S, Pg/Z, [Zbases.S, #0]" + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, Vector addresses); + + /// svuint32_t svld1ub_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) : "LD1B Zresult.S, Pg/Z, [Zbases.S, #0]" + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, Vector addresses); + + /// svint64_t svld1ub_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) : "LD1B Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, Vector addresses); + + /// svuint64_t svld1ub_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) : "LD1B Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, Vector addresses); + + /// svint32_t svld1ub_gather_[s32]offset_s32(svbool_t pg, const uint8_t *base, svint32_t offsets) : "LD1B Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]" + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices); + + /// svuint32_t svld1ub_gather_[s32]offset_u32(svbool_t pg, const uint8_t *base, svint32_t offsets) : "LD1B Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]" + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices); + + /// svint32_t svld1ub_gather_[u32]offset_s32(svbool_t pg, const uint8_t *base, svuint32_t offsets) : "LD1B Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]" + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices); + + /// svuint32_t svld1ub_gather_[u32]offset_u32(svbool_t pg, const uint8_t *base, svuint32_t offsets) : "LD1B Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]" + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices); + + /// svint64_t svld1ub_gather_[s64]offset_s64(svbool_t pg, const uint8_t *base, svint64_t offsets) : "LD1B Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices); + + /// svuint64_t svld1ub_gather_[s64]offset_u64(svbool_t pg, const uint8_t *base, svint64_t offsets) : "LD1B Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices); + + /// svint64_t svld1ub_gather_[u64]offset_s64(svbool_t pg, const uint8_t *base, svuint64_t offsets) : "LD1B Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices); + + /// svuint64_t svld1ub_gather_[u64]offset_u64(svbool_t pg, const uint8_t *base, svuint64_t offsets) : "LD1B Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices); + + + /// GatherVectorInt16SignExtend : Load 16-bit data and sign-extend + + /// svint32_t svld1sh_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) : "LD1SH Zresult.S, Pg/Z, [Zbases.S, #0]" + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, Vector addresses); + + /// svuint32_t svld1sh_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) : "LD1SH Zresult.S, Pg/Z, [Zbases.S, #0]" + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, Vector addresses); + + /// svint64_t svld1sh_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) : "LD1SH Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, Vector addresses); + + /// svuint64_t svld1sh_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) : "LD1SH Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, Vector addresses); + + /// svint32_t svld1sh_gather_[s32]index_s32(svbool_t pg, const int16_t *base, svint32_t indices) : "LD1SH Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #1]" + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices); + + /// svuint32_t svld1sh_gather_[s32]index_u32(svbool_t pg, const int16_t *base, svint32_t indices) : "LD1SH Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #1]" + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices); + + /// svint32_t svld1sh_gather_[u32]index_s32(svbool_t pg, const int16_t *base, svuint32_t indices) : "LD1SH Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #1]" + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices); + + /// svuint32_t svld1sh_gather_[u32]index_u32(svbool_t pg, const int16_t *base, svuint32_t indices) : "LD1SH Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #1]" + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices); + + /// svint64_t svld1sh_gather_[s64]index_s64(svbool_t pg, const int16_t *base, svint64_t indices) : "LD1SH Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1]" + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices); + + /// svuint64_t svld1sh_gather_[s64]index_u64(svbool_t pg, const int16_t *base, svint64_t indices) : "LD1SH Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1]" + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices); + + /// svint64_t svld1sh_gather_[u64]index_s64(svbool_t pg, const int16_t *base, svuint64_t indices) : "LD1SH Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1]" + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices); + + /// svuint64_t svld1sh_gather_[u64]index_u64(svbool_t pg, const int16_t *base, svuint64_t indices) : "LD1SH Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1]" + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices); + + + /// GatherVectorInt16WithByteOffsetsSignExtend : Load 16-bit data and sign-extend + + /// svint32_t svld1sh_gather_[s32]offset_s32(svbool_t pg, const int16_t *base, svint32_t offsets) : "LD1SH Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]" + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets); + + /// svuint32_t svld1sh_gather_[s32]offset_u32(svbool_t pg, const int16_t *base, svint32_t offsets) : "LD1SH Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]" + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets); + + /// svint32_t svld1sh_gather_[u32]offset_s32(svbool_t pg, const int16_t *base, svuint32_t offsets) : "LD1SH Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]" + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets); + + /// svuint32_t svld1sh_gather_[u32]offset_u32(svbool_t pg, const int16_t *base, svuint32_t offsets) : "LD1SH Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]" + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets); + + /// svint64_t svld1sh_gather_[s64]offset_s64(svbool_t pg, const int16_t *base, svint64_t offsets) : "LD1SH Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets); + + /// svuint64_t svld1sh_gather_[s64]offset_u64(svbool_t pg, const int16_t *base, svint64_t offsets) : "LD1SH Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets); + + /// svint64_t svld1sh_gather_[u64]offset_s64(svbool_t pg, const int16_t *base, svuint64_t offsets) : "LD1SH Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets); + + /// svuint64_t svld1sh_gather_[u64]offset_u64(svbool_t pg, const int16_t *base, svuint64_t offsets) : "LD1SH Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets); + + + /// GatherVectorInt32SignExtend : Load 32-bit data and sign-extend + + /// svint64_t svld1sw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) : "LD1SW Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, Vector addresses); + + /// svint64_t svld1sw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) : "LD1SW Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, Vector addresses); + + /// svuint64_t svld1sw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) : "LD1SW Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, Vector addresses); + + /// svuint64_t svld1sw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) : "LD1SW Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, Vector addresses); + + /// svint64_t svld1sw_gather_[s64]index_s64(svbool_t pg, const int32_t *base, svint64_t indices) : "LD1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices); + + /// svint64_t svld1sw_gather_[s64]index_s64(svbool_t pg, const int32_t *base, svint64_t indices) : "LD1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices); + + /// svuint64_t svld1sw_gather_[s64]index_u64(svbool_t pg, const int32_t *base, svint64_t indices) : "LD1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices); + + /// svuint64_t svld1sw_gather_[s64]index_u64(svbool_t pg, const int32_t *base, svint64_t indices) : "LD1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices); + + /// svint64_t svld1sw_gather_[u64]index_s64(svbool_t pg, const int32_t *base, svuint64_t indices) : "LD1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices); + + /// svint64_t svld1sw_gather_[u64]index_s64(svbool_t pg, const int32_t *base, svuint64_t indices) : "LD1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices); + + /// svuint64_t svld1sw_gather_[u64]index_u64(svbool_t pg, const int32_t *base, svuint64_t indices) : "LD1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices); + + /// svuint64_t svld1sw_gather_[u64]index_u64(svbool_t pg, const int32_t *base, svuint64_t indices) : "LD1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices); + + + /// GatherVectorInt32WithByteOffsetsSignExtend : Load 32-bit data and sign-extend + + /// svint64_t svld1sw_gather_[s64]offset_s64(svbool_t pg, const int32_t *base, svint64_t offsets) : "LD1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets); + + /// svint64_t svld1sw_gather_[s64]offset_s64(svbool_t pg, const int32_t *base, svint64_t offsets) : "LD1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets); + + /// svuint64_t svld1sw_gather_[s64]offset_u64(svbool_t pg, const int32_t *base, svint64_t offsets) : "LD1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets); + + /// svuint64_t svld1sw_gather_[s64]offset_u64(svbool_t pg, const int32_t *base, svint64_t offsets) : "LD1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets); + + /// svint64_t svld1sw_gather_[u64]offset_s64(svbool_t pg, const int32_t *base, svuint64_t offsets) : "LD1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets); + + /// svint64_t svld1sw_gather_[u64]offset_s64(svbool_t pg, const int32_t *base, svuint64_t offsets) : "LD1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets); + + /// svuint64_t svld1sw_gather_[u64]offset_u64(svbool_t pg, const int32_t *base, svuint64_t offsets) : "LD1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets); + + /// svuint64_t svld1sw_gather_[u64]offset_u64(svbool_t pg, const int32_t *base, svuint64_t offsets) : "LD1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets); + + + /// GatherVectorSByteSignExtend : Load 8-bit data and sign-extend + + /// svint32_t svld1sb_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) : "LD1SB Zresult.S, Pg/Z, [Zbases.S, #0]" + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, Vector addresses); + + /// svuint32_t svld1sb_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) : "LD1SB Zresult.S, Pg/Z, [Zbases.S, #0]" + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, Vector addresses); + + /// svint64_t svld1sb_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) : "LD1SB Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, Vector addresses); + + /// svuint64_t svld1sb_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) : "LD1SB Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, Vector addresses); + + /// svint32_t svld1sb_gather_[s32]offset_s32(svbool_t pg, const int8_t *base, svint32_t offsets) : "LD1SB Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]" + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices); + + /// svuint32_t svld1sb_gather_[s32]offset_u32(svbool_t pg, const int8_t *base, svint32_t offsets) : "LD1SB Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]" + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices); + + /// svint32_t svld1sb_gather_[u32]offset_s32(svbool_t pg, const int8_t *base, svuint32_t offsets) : "LD1SB Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]" + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices); + + /// svuint32_t svld1sb_gather_[u32]offset_u32(svbool_t pg, const int8_t *base, svuint32_t offsets) : "LD1SB Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]" + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices); + + /// svint64_t svld1sb_gather_[s64]offset_s64(svbool_t pg, const int8_t *base, svint64_t offsets) : "LD1SB Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices); + + /// svuint64_t svld1sb_gather_[s64]offset_u64(svbool_t pg, const int8_t *base, svint64_t offsets) : "LD1SB Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices); + + /// svint64_t svld1sb_gather_[u64]offset_s64(svbool_t pg, const int8_t *base, svuint64_t offsets) : "LD1SB Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices); + + /// svuint64_t svld1sb_gather_[u64]offset_u64(svbool_t pg, const int8_t *base, svuint64_t offsets) : "LD1SB Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices); + + + /// GatherVectorUInt16WithByteOffsetsZeroExtend : Load 16-bit data and zero-extend + + /// svint32_t svld1uh_gather_[s32]offset_s32(svbool_t pg, const uint16_t *base, svint32_t offsets) : "LD1H Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]" + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets); + + /// svuint32_t svld1uh_gather_[s32]offset_u32(svbool_t pg, const uint16_t *base, svint32_t offsets) : "LD1H Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]" + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets); + + /// svint32_t svld1uh_gather_[u32]offset_s32(svbool_t pg, const uint16_t *base, svuint32_t offsets) : "LD1H Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]" + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets); + + /// svuint32_t svld1uh_gather_[u32]offset_u32(svbool_t pg, const uint16_t *base, svuint32_t offsets) : "LD1H Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]" + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets); + + /// svint64_t svld1uh_gather_[s64]offset_s64(svbool_t pg, const uint16_t *base, svint64_t offsets) : "LD1H Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets); + + /// svuint64_t svld1uh_gather_[s64]offset_u64(svbool_t pg, const uint16_t *base, svint64_t offsets) : "LD1H Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets); + + /// svint64_t svld1uh_gather_[u64]offset_s64(svbool_t pg, const uint16_t *base, svuint64_t offsets) : "LD1H Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets); + + /// svuint64_t svld1uh_gather_[u64]offset_u64(svbool_t pg, const uint16_t *base, svuint64_t offsets) : "LD1H Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets); + + + /// GatherVectorUInt16ZeroExtend : Load 16-bit data and zero-extend + + /// svint32_t svld1uh_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) : "LD1H Zresult.S, Pg/Z, [Zbases.S, #0]" + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, Vector addresses); + + /// svuint32_t svld1uh_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) : "LD1H Zresult.S, Pg/Z, [Zbases.S, #0]" + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, Vector addresses); + + /// svint64_t svld1uh_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) : "LD1H Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, Vector addresses); + + /// svuint64_t svld1uh_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) : "LD1H Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, Vector addresses); + + /// svint32_t svld1uh_gather_[s32]index_s32(svbool_t pg, const uint16_t *base, svint32_t indices) : "LD1H Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #1]" + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices); + + /// svuint32_t svld1uh_gather_[s32]index_u32(svbool_t pg, const uint16_t *base, svint32_t indices) : "LD1H Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #1]" + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices); + + /// svint32_t svld1uh_gather_[u32]index_s32(svbool_t pg, const uint16_t *base, svuint32_t indices) : "LD1H Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #1]" + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices); + + /// svuint32_t svld1uh_gather_[u32]index_u32(svbool_t pg, const uint16_t *base, svuint32_t indices) : "LD1H Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #1]" + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices); + + /// svint64_t svld1uh_gather_[s64]index_s64(svbool_t pg, const uint16_t *base, svint64_t indices) : "LD1H Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1]" + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices); + + /// svuint64_t svld1uh_gather_[s64]index_u64(svbool_t pg, const uint16_t *base, svint64_t indices) : "LD1H Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1]" + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices); + + /// svint64_t svld1uh_gather_[u64]index_s64(svbool_t pg, const uint16_t *base, svuint64_t indices) : "LD1H Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1]" + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices); + + /// svuint64_t svld1uh_gather_[u64]index_u64(svbool_t pg, const uint16_t *base, svuint64_t indices) : "LD1H Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1]" + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices); + + + /// GatherVectorUInt32WithByteOffsetsZeroExtend : Load 32-bit data and zero-extend + + /// svint64_t svld1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets) : "LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets); + + /// svint64_t svld1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets) : "LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets); + + /// svuint64_t svld1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets) : "LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets); + + /// svuint64_t svld1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets) : "LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets); + + /// svint64_t svld1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets) : "LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets); + + /// svint64_t svld1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets) : "LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets); + + /// svuint64_t svld1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets) : "LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets); + + /// svuint64_t svld1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets) : "LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets); + + + /// GatherVectorUInt32ZeroExtend : Load 32-bit data and zero-extend + + /// svint64_t svld1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) : "LD1W Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, Vector addresses); + + /// svint64_t svld1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) : "LD1W Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, Vector addresses); + + /// svuint64_t svld1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) : "LD1W Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, Vector addresses); + + /// svuint64_t svld1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) : "LD1W Zresult.D, Pg/Z, [Zbases.D, #0]" + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, Vector addresses); + + /// svint64_t svld1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices) : "LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices); + + /// svint64_t svld1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices) : "LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices); + + /// svuint64_t svld1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices) : "LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices); + + /// svuint64_t svld1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices) : "LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices); + + /// svint64_t svld1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices) : "LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices); + + /// svint64_t svld1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices) : "LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices); + + /// svuint64_t svld1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices) : "LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices); + + /// svuint64_t svld1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices) : "LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2]" + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices); + + + /// GatherVectorWithByteOffsets : Unextended load + + /// svfloat32_t svld1_gather_[s32]offset[_f32](svbool_t pg, const float32_t *base, svint32_t offsets) : "LD1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]" + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, float* address, Vector offsets); + + /// svint32_t svld1_gather_[s32]offset[_s32](svbool_t pg, const int32_t *base, svint32_t offsets) : "LD1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]" + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, int* address, Vector offsets); + + /// svuint32_t svld1_gather_[s32]offset[_u32](svbool_t pg, const uint32_t *base, svint32_t offsets) : "LD1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW]" + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, uint* address, Vector offsets); + + /// svfloat32_t svld1_gather_[u32]offset[_f32](svbool_t pg, const float32_t *base, svuint32_t offsets) : "LD1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]" + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, float* address, Vector offsets); + + /// svint32_t svld1_gather_[u32]offset[_s32](svbool_t pg, const int32_t *base, svuint32_t offsets) : "LD1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]" + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, int* address, Vector offsets); + + /// svuint32_t svld1_gather_[u32]offset[_u32](svbool_t pg, const uint32_t *base, svuint32_t offsets) : "LD1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW]" + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, uint* address, Vector offsets); + + /// svfloat64_t svld1_gather_[s64]offset[_f64](svbool_t pg, const float64_t *base, svint64_t offsets) : "LD1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, double* address, Vector offsets); + + /// svint64_t svld1_gather_[s64]offset[_s64](svbool_t pg, const int64_t *base, svint64_t offsets) : "LD1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, long* address, Vector offsets); + + /// svuint64_t svld1_gather_[s64]offset[_u64](svbool_t pg, const uint64_t *base, svint64_t offsets) : "LD1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, ulong* address, Vector offsets); + + /// svfloat64_t svld1_gather_[u64]offset[_f64](svbool_t pg, const float64_t *base, svuint64_t offsets) : "LD1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, double* address, Vector offsets); + + /// svint64_t svld1_gather_[u64]offset[_s64](svbool_t pg, const int64_t *base, svuint64_t offsets) : "LD1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, long* address, Vector offsets); + + /// svuint64_t svld1_gather_[u64]offset[_u64](svbool_t pg, const uint64_t *base, svuint64_t offsets) : "LD1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D]" + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, ulong* address, Vector offsets); + + + /// total method signatures: 182 + /// total method names: 16 +} + + + /// Rejected: + /// public static unsafe void GatherPrefetch16Bit(Vector mask, Vector addresses, long index, [ConstantExpected] SvePrefetchType prefetchType); // svprfh_gather[_u32base]_index + /// public static unsafe void GatherPrefetch16Bit(Vector mask, Vector addresses, long index, [ConstantExpected] SvePrefetchType prefetchType); // svprfh_gather[_u32base]_index + /// public static unsafe void GatherPrefetch16Bit(Vector mask, Vector addresses, long index, [ConstantExpected] SvePrefetchType prefetchType); // svprfh_gather[_u64base]_index + /// public static unsafe void GatherPrefetch16Bit(Vector mask, Vector addresses, long index, [ConstantExpected] SvePrefetchType prefetchType); // svprfh_gather[_u64base]_index + /// public static unsafe void GatherPrefetch32Bit(Vector mask, Vector addresses, long index, [ConstantExpected] SvePrefetchType prefetchType); // svprfw_gather[_u32base]_index + /// public static unsafe void GatherPrefetch32Bit(Vector mask, Vector addresses, long index, [ConstantExpected] SvePrefetchType prefetchType); // svprfw_gather[_u32base]_index + /// public static unsafe void GatherPrefetch32Bit(Vector mask, Vector addresses, long index, [ConstantExpected] SvePrefetchType prefetchType); // svprfw_gather[_u64base]_index + /// public static unsafe void GatherPrefetch32Bit(Vector mask, Vector addresses, long index, [ConstantExpected] SvePrefetchType prefetchType); // svprfw_gather[_u64base]_index + /// public static unsafe void GatherPrefetch64Bit(Vector mask, Vector addresses, long index, [ConstantExpected] SvePrefetchType prefetchType); // svprfd_gather[_u32base]_index + /// public static unsafe void GatherPrefetch64Bit(Vector mask, Vector addresses, long index, [ConstantExpected] SvePrefetchType prefetchType); // svprfd_gather[_u32base]_index + /// public static unsafe void GatherPrefetch64Bit(Vector mask, Vector addresses, long index, [ConstantExpected] SvePrefetchType prefetchType); // svprfd_gather[_u64base]_index + /// public static unsafe void GatherPrefetch64Bit(Vector mask, Vector addresses, long index, [ConstantExpected] SvePrefetchType prefetchType); // svprfd_gather[_u64base]_index + /// public static unsafe void GatherPrefetch8Bit(Vector mask, Vector addresses, long offset, [ConstantExpected] SvePrefetchType prefetchType); // svprfb_gather[_u32base]_offset + /// public static unsafe void GatherPrefetch8Bit(Vector mask, Vector addresses, long offset, [ConstantExpected] SvePrefetchType prefetchType); // svprfb_gather[_u32base]_offset + /// public static unsafe void GatherPrefetch8Bit(Vector mask, Vector addresses, long offset, [ConstantExpected] SvePrefetchType prefetchType); // svprfb_gather[_u64base]_offset + /// public static unsafe void GatherPrefetch8Bit(Vector mask, Vector addresses, long offset, [ConstantExpected] SvePrefetchType prefetchType); // svprfb_gather[_u64base]_offset + /// public static unsafe Vector GatherVector(Vector mask, Vector addresses, long offset); // svld1_gather[_u32base]_offset_f32 + /// public static unsafe Vector GatherVector(Vector mask, Vector addresses, long offset); // svld1_gather[_u32base]_offset_s32 + /// public static unsafe Vector GatherVector(Vector mask, Vector addresses, long offset); // svld1_gather[_u32base]_offset_u32 + /// public static unsafe Vector GatherVector(Vector mask, Vector addresses, long offset); // svld1_gather[_u64base]_offset_f64 + /// public static unsafe Vector GatherVector(Vector mask, Vector addresses, long offset); // svld1_gather[_u64base]_offset_s64 + /// public static unsafe Vector GatherVector(Vector mask, Vector addresses, long offset); // svld1_gather[_u64base]_offset_u64 + /// public static unsafe Vector GatherVector(Vector mask, Vector addresses, long index); // svld1_gather[_u32base]_index_f32 + /// public static unsafe Vector GatherVector(Vector mask, Vector addresses, long index); // svld1_gather[_u32base]_index_s32 + /// public static unsafe Vector GatherVector(Vector mask, Vector addresses, long index); // svld1_gather[_u32base]_index_u32 + /// public static unsafe Vector GatherVector(Vector mask, Vector addresses, long index); // svld1_gather[_u64base]_index_f64 + /// public static unsafe Vector GatherVector(Vector mask, Vector addresses, long index); // svld1_gather[_u64base]_index_s64 + /// public static unsafe Vector GatherVector(Vector mask, Vector addresses, long index); // svld1_gather[_u64base]_index_u64 + /// public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, Vector address, long indices); // svld1ub_gather[_u32base]_offset_s32 + /// public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, Vector address, long indices); // svld1ub_gather[_u32base]_offset_u32 + /// public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, Vector address, long indices); // svld1ub_gather[_u64base]_offset_s64 + /// public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, Vector address, long indices); // svld1ub_gather[_u64base]_offset_u64 + /// public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, Vector addresses, long index); // svld1sh_gather[_u32base]_index_s32 + /// public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, Vector addresses, long index); // svld1sh_gather[_u32base]_index_u32 + /// public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, Vector addresses, long index); // svld1sh_gather[_u64base]_index_s64 + /// public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, Vector addresses, long index); // svld1sh_gather[_u64base]_index_u64 + /// public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, Vector addresses, long offset); // svld1sh_gather[_u32base]_offset_s32 + /// public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, Vector addresses, long offset); // svld1sh_gather[_u32base]_offset_u32 + /// public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, Vector addresses, long offset); // svld1sh_gather[_u64base]_offset_s64 + /// public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, Vector addresses, long offset); // svld1sh_gather[_u64base]_offset_u64 + /// public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, Vector addresses, long index); // svld1sw_gather[_u64base]_index_s64 + /// public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, Vector addresses, long index); // svld1sw_gather[_u64base]_index_s64 + /// public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, Vector addresses, long index); // svld1sw_gather[_u64base]_index_u64 + /// public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, Vector addresses, long index); // svld1sw_gather[_u64base]_index_u64 + /// public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, Vector addresses, long offset); // svld1sw_gather[_u64base]_offset_s64 + /// public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, Vector addresses, long offset); // svld1sw_gather[_u64base]_offset_s64 + /// public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, Vector addresses, long offset); // svld1sw_gather[_u64base]_offset_u64 + /// public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, Vector addresses, long offset); // svld1sw_gather[_u64base]_offset_u64 + /// public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, Vector address, long indices); // svld1sb_gather[_u32base]_offset_s32 + /// public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, Vector address, long indices); // svld1sb_gather[_u32base]_offset_u32 + /// public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, Vector address, long indices); // svld1sb_gather[_u64base]_offset_s64 + /// public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, Vector address, long indices); // svld1sb_gather[_u64base]_offset_u64 + /// public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, Vector addresses, long offset); // svld1uh_gather[_u32base]_offset_s32 + /// public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, Vector addresses, long offset); // svld1uh_gather[_u32base]_offset_u32 + /// public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, Vector addresses, long offset); // svld1uh_gather[_u64base]_offset_s64 + /// public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, Vector addresses, long offset); // svld1uh_gather[_u64base]_offset_u64 + /// public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, Vector addresses, long index); // svld1uh_gather[_u32base]_index_s32 + /// public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, Vector addresses, long index); // svld1uh_gather[_u32base]_index_u32 + /// public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, Vector addresses, long index); // svld1uh_gather[_u64base]_index_s64 + /// public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, Vector addresses, long index); // svld1uh_gather[_u64base]_index_u64 + /// public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, Vector addresses, long offset); // svld1uw_gather[_u64base]_offset_s64 + /// public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, Vector addresses, long offset); // svld1uw_gather[_u64base]_offset_s64 + /// public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, Vector addresses, long offset); // svld1uw_gather[_u64base]_offset_u64 + /// public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, Vector addresses, long offset); // svld1uw_gather[_u64base]_offset_u64 + /// public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, Vector addresses, long index); // svld1uw_gather[_u64base]_index_s64 + /// public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, Vector addresses, long index); // svld1uw_gather[_u64base]_index_s64 + /// public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, Vector addresses, long index); // svld1uw_gather[_u64base]_index_u64 + /// public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, Vector addresses, long index); // svld1uw_gather[_u64base]_index_u64 + /// Total Rejected: 68 + + /// Total ACLE covered across API: 250 + diff --git a/sve_api/out_api/apiraw_FEAT_SVE__loads.cs b/sve_api/out_api/apiraw_FEAT_SVE__loads.cs new file mode 100644 index 0000000000000..7f5fabbdcb2f9 --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_SVE__loads.cs @@ -0,0 +1,905 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: loads +{ + + /// T: [uint, int], [ulong, long] + public static unsafe Vector Compute16BitAddresses(Vector bases, Vector indices); // ADR + + /// T: uint, ulong + public static unsafe Vector Compute16BitAddresses(Vector bases, Vector indices); // ADR + + /// T: [uint, int], [ulong, long] + public static unsafe Vector Compute32BitAddresses(Vector bases, Vector indices); // ADR + + /// T: uint, ulong + public static unsafe Vector Compute32BitAddresses(Vector bases, Vector indices); // ADR + + /// T: [uint, int], [ulong, long] + public static unsafe Vector Compute64BitAddresses(Vector bases, Vector indices); // ADR + + /// T: uint, ulong + public static unsafe Vector Compute64BitAddresses(Vector bases, Vector indices); // ADR + + /// T: [uint, int], [ulong, long] + public static unsafe Vector Compute8BitAddresses(Vector bases, Vector indices); // ADR + + /// T: uint, ulong + public static unsafe Vector Compute8BitAddresses(Vector bases, Vector indices); // ADR + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector LoadVector(Vector mask, T* address); // LD1W or LD1D or LD1B or LD1H + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, T* address); // LD1RQW or LD1RQD or LD1RQB or LD1RQH + + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToInt16(byte* address); // LDNF1B // predicated + + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToInt32(byte* address); // LDNF1B // predicated + + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToInt64(byte* address); // LDNF1B // predicated + + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToUInt16(byte* address); // LDNF1B // predicated + + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToUInt32(byte* address); // LDNF1B // predicated + + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToUInt64(byte* address); // LDNF1B // predicated + + public static unsafe Vector LoadVectorByteZeroExtendToInt16(Vector mask, byte* address); // LD1B + + public static unsafe Vector LoadVectorByteZeroExtendToInt32(Vector mask, byte* address); // LD1B + + public static unsafe Vector LoadVectorByteZeroExtendToInt64(Vector mask, byte* address); // LD1B + + public static unsafe Vector LoadVectorByteZeroExtendToUInt16(Vector mask, byte* address); // LD1B + + public static unsafe Vector LoadVectorByteZeroExtendToUInt32(Vector mask, byte* address); // LD1B + + public static unsafe Vector LoadVectorByteZeroExtendToUInt64(Vector mask, byte* address); // LD1B + + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToInt32(short* address); // LDNF1SH // predicated + + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToInt64(short* address); // LDNF1SH // predicated + + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToUInt32(short* address); // LDNF1SH // predicated + + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToUInt64(short* address); // LDNF1SH // predicated + + public static unsafe Vector LoadVectorInt16SignExtendToInt32(Vector mask, short* address); // LD1SH + + public static unsafe Vector LoadVectorInt16SignExtendToInt64(Vector mask, short* address); // LD1SH + + public static unsafe Vector LoadVectorInt16SignExtendToUInt32(Vector mask, short* address); // LD1SH + + public static unsafe Vector LoadVectorInt16SignExtendToUInt64(Vector mask, short* address); // LD1SH + + public static unsafe Vector LoadVectorInt32NonFaultingSignExtendToInt64(int* address); // LDNF1SW // predicated + + public static unsafe Vector LoadVectorInt32NonFaultingSignExtendToUInt64(int* address); // LDNF1SW // predicated + + public static unsafe Vector LoadVectorInt32SignExtendToInt64(Vector mask, int* address); // LD1SW + + public static unsafe Vector LoadVectorInt32SignExtendToUInt64(Vector mask, int* address); // LD1SW + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector LoadVectorNonFaulting(T* address); // LDNF1W or LDNF1D or LDNF1B or LDNF1H // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector LoadVectorNonTemporal(Vector mask, T* address); // LDNT1W or LDNT1D or LDNT1B or LDNT1H + + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToInt16(sbyte* address); // LDNF1SB // predicated + + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToInt32(sbyte* address); // LDNF1SB // predicated + + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToInt64(sbyte* address); // LDNF1SB // predicated + + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToUInt16(sbyte* address); // LDNF1SB // predicated + + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToUInt32(sbyte* address); // LDNF1SB // predicated + + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToUInt64(sbyte* address); // LDNF1SB // predicated + + public static unsafe Vector LoadVectorSByteSignExtendToInt16(Vector mask, sbyte* address); // LD1SB + + public static unsafe Vector LoadVectorSByteSignExtendToInt32(Vector mask, sbyte* address); // LD1SB + + public static unsafe Vector LoadVectorSByteSignExtendToInt64(Vector mask, sbyte* address); // LD1SB + + public static unsafe Vector LoadVectorSByteSignExtendToUInt16(Vector mask, sbyte* address); // LD1SB + + public static unsafe Vector LoadVectorSByteSignExtendToUInt32(Vector mask, sbyte* address); // LD1SB + + public static unsafe Vector LoadVectorSByteSignExtendToUInt64(Vector mask, sbyte* address); // LD1SB + + public static unsafe Vector LoadVectorUInt16NonFaultingZeroExtendToInt32(ushort* address); // LDNF1H // predicated + + public static unsafe Vector LoadVectorUInt16NonFaultingZeroExtendToInt64(ushort* address); // LDNF1H // predicated + + public static unsafe Vector LoadVectorUInt16NonFaultingZeroExtendToUInt32(ushort* address); // LDNF1H // predicated + + public static unsafe Vector LoadVectorUInt16NonFaultingZeroExtendToUInt64(ushort* address); // LDNF1H // predicated + + public static unsafe Vector LoadVectorUInt16ZeroExtendToInt32(Vector mask, ushort* address); // LD1H + + public static unsafe Vector LoadVectorUInt16ZeroExtendToInt64(Vector mask, ushort* address); // LD1H + + public static unsafe Vector LoadVectorUInt16ZeroExtendToUInt32(Vector mask, ushort* address); // LD1H + + public static unsafe Vector LoadVectorUInt16ZeroExtendToUInt64(Vector mask, ushort* address); // LD1H + + public static unsafe Vector LoadVectorUInt32NonFaultingZeroExtendToInt64(uint* address); // LDNF1W // predicated + + public static unsafe Vector LoadVectorUInt32NonFaultingZeroExtendToUInt64(uint* address); // LDNF1W // predicated + + public static unsafe Vector LoadVectorUInt32ZeroExtendToInt64(Vector mask, uint* address); // LD1W + + public static unsafe Vector LoadVectorUInt32ZeroExtendToUInt64(Vector mask, uint* address); // LD1W + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, T* address); // LD2W or LD2D or LD2B or LD2H + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, T* address); // LD3W or LD3D or LD3B or LD3H + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, T* address); // LD4W or LD4D or LD4B or LD4H + + public static unsafe void PrefetchBytes(Vector mask, void* address, [ConstantExpected] SvePrefetchType prefetchType); // PRFB + + public static unsafe void PrefetchInt16(Vector mask, void* address, [ConstantExpected] SvePrefetchType prefetchType); // PRFH + + public static unsafe void PrefetchInt32(Vector mask, void* address, [ConstantExpected] SvePrefetchType prefetchType); // PRFW + + public static unsafe void PrefetchInt64(Vector mask, void* address, [ConstantExpected] SvePrefetchType prefetchType); // PRFD + + +public enum SvePrefetchType +{ + LoadL1Temporal = 0, + LoadL1NonTemporal = 1, + LoadL2Temporal = 2, + LoadL2NonTemporal = 3, + LoadL3Temporal = 4, + LoadL3NonTemporal = 5, + StoreL1Temporal = 8, + StoreL1NonTemporal = 9, + StoreL2Temporal = 10, + StoreL2NonTemporal = 11, + StoreL3Temporal = 12, + StoreL3NonTemporal = 13 +}; + + /// total method signatures: 67 + +} + + +/// Full API +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: loads +{ + /// Compute16BitAddresses : Compute vector addresses for 16-bit data + + /// svuint32_t svadrh[_u32base]_[s32]index(svuint32_t bases, svint32_t indices) : "ADR Zresult.S, [Zbases.S, Zindices.S, LSL #1]" + public static unsafe Vector Compute16BitAddresses(Vector bases, Vector indices); + + /// svuint32_t svadrh[_u32base]_[u32]index(svuint32_t bases, svuint32_t indices) : "ADR Zresult.S, [Zbases.S, Zindices.S, LSL #1]" + public static unsafe Vector Compute16BitAddresses(Vector bases, Vector indices); + + /// svuint64_t svadrh[_u64base]_[s64]index(svuint64_t bases, svint64_t indices) : "ADR Zresult.D, [Zbases.D, Zindices.D, LSL #1]" + public static unsafe Vector Compute16BitAddresses(Vector bases, Vector indices); + + /// svuint64_t svadrh[_u64base]_[u64]index(svuint64_t bases, svuint64_t indices) : "ADR Zresult.D, [Zbases.D, Zindices.D, LSL #1]" + public static unsafe Vector Compute16BitAddresses(Vector bases, Vector indices); + + + /// Compute32BitAddresses : Compute vector addresses for 32-bit data + + /// svuint32_t svadrw[_u32base]_[s32]index(svuint32_t bases, svint32_t indices) : "ADR Zresult.S, [Zbases.S, Zindices.S, LSL #2]" + public static unsafe Vector Compute32BitAddresses(Vector bases, Vector indices); + + /// svuint32_t svadrw[_u32base]_[u32]index(svuint32_t bases, svuint32_t indices) : "ADR Zresult.S, [Zbases.S, Zindices.S, LSL #2]" + public static unsafe Vector Compute32BitAddresses(Vector bases, Vector indices); + + /// svuint64_t svadrw[_u64base]_[s64]index(svuint64_t bases, svint64_t indices) : "ADR Zresult.D, [Zbases.D, Zindices.D, LSL #2]" + public static unsafe Vector Compute32BitAddresses(Vector bases, Vector indices); + + /// svuint64_t svadrw[_u64base]_[u64]index(svuint64_t bases, svuint64_t indices) : "ADR Zresult.D, [Zbases.D, Zindices.D, LSL #2]" + public static unsafe Vector Compute32BitAddresses(Vector bases, Vector indices); + + + /// Compute64BitAddresses : Compute vector addresses for 64-bit data + + /// svuint32_t svadrd[_u32base]_[s32]index(svuint32_t bases, svint32_t indices) : "ADR Zresult.S, [Zbases.S, Zindices.S, LSL #3]" + public static unsafe Vector Compute64BitAddresses(Vector bases, Vector indices); + + /// svuint32_t svadrd[_u32base]_[u32]index(svuint32_t bases, svuint32_t indices) : "ADR Zresult.S, [Zbases.S, Zindices.S, LSL #3]" + public static unsafe Vector Compute64BitAddresses(Vector bases, Vector indices); + + /// svuint64_t svadrd[_u64base]_[s64]index(svuint64_t bases, svint64_t indices) : "ADR Zresult.D, [Zbases.D, Zindices.D, LSL #3]" + public static unsafe Vector Compute64BitAddresses(Vector bases, Vector indices); + + /// svuint64_t svadrd[_u64base]_[u64]index(svuint64_t bases, svuint64_t indices) : "ADR Zresult.D, [Zbases.D, Zindices.D, LSL #3]" + public static unsafe Vector Compute64BitAddresses(Vector bases, Vector indices); + + + /// Compute8BitAddresses : Compute vector addresses for 8-bit data + + /// svuint32_t svadrb[_u32base]_[s32]offset(svuint32_t bases, svint32_t offsets) : "ADR Zresult.S, [Zbases.S, Zoffsets.S]" + public static unsafe Vector Compute8BitAddresses(Vector bases, Vector indices); + + /// svuint32_t svadrb[_u32base]_[u32]offset(svuint32_t bases, svuint32_t offsets) : "ADR Zresult.S, [Zbases.S, Zoffsets.S]" + public static unsafe Vector Compute8BitAddresses(Vector bases, Vector indices); + + /// svuint64_t svadrb[_u64base]_[s64]offset(svuint64_t bases, svint64_t offsets) : "ADR Zresult.D, [Zbases.D, Zoffsets.D]" + public static unsafe Vector Compute8BitAddresses(Vector bases, Vector indices); + + /// svuint64_t svadrb[_u64base]_[u64]offset(svuint64_t bases, svuint64_t offsets) : "ADR Zresult.D, [Zbases.D, Zoffsets.D]" + public static unsafe Vector Compute8BitAddresses(Vector bases, Vector indices); + + + /// LoadVector : Unextended load + + /// svfloat32_t svld1[_f32](svbool_t pg, const float32_t *base) : "LD1W Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2]" or "LD1W Zresult.S, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVector(Vector mask, float* address); + + /// svfloat64_t svld1[_f64](svbool_t pg, const float64_t *base) : "LD1D Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3]" or "LD1D Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVector(Vector mask, double* address); + + /// svint8_t svld1[_s8](svbool_t pg, const int8_t *base) : "LD1B Zresult.B, Pg/Z, [Xarray, Xindex]" or "LD1B Zresult.B, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVector(Vector mask, sbyte* address); + + /// svint16_t svld1[_s16](svbool_t pg, const int16_t *base) : "LD1H Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVector(Vector mask, short* address); + + /// svint32_t svld1[_s32](svbool_t pg, const int32_t *base) : "LD1W Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2]" or "LD1W Zresult.S, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVector(Vector mask, int* address); + + /// svint64_t svld1[_s64](svbool_t pg, const int64_t *base) : "LD1D Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3]" or "LD1D Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVector(Vector mask, long* address); + + /// svuint8_t svld1[_u8](svbool_t pg, const uint8_t *base) : "LD1B Zresult.B, Pg/Z, [Xarray, Xindex]" or "LD1B Zresult.B, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVector(Vector mask, byte* address); + + /// svuint16_t svld1[_u16](svbool_t pg, const uint16_t *base) : "LD1H Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVector(Vector mask, ushort* address); + + /// svuint32_t svld1[_u32](svbool_t pg, const uint32_t *base) : "LD1W Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2]" or "LD1W Zresult.S, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVector(Vector mask, uint* address); + + /// svuint64_t svld1[_u64](svbool_t pg, const uint64_t *base) : "LD1D Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3]" or "LD1D Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVector(Vector mask, ulong* address); + + + /// LoadVector128AndReplicateToVector : Load and replicate 128 bits of data + + /// svfloat32_t svld1rq[_f32](svbool_t pg, const float32_t *base) : "LD1RQW Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2]" or "LD1RQW Zresult.S, Pg/Z, [Xarray, #index * 4]" or "LD1RQW Zresult.S, Pg/Z, [Xbase, #0]" + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, float* address); + + /// svfloat64_t svld1rq[_f64](svbool_t pg, const float64_t *base) : "LD1RQD Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3]" or "LD1RQD Zresult.D, Pg/Z, [Xarray, #index * 8]" or "LD1RQD Zresult.D, Pg/Z, [Xbase, #0]" + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, double* address); + + /// svint8_t svld1rq[_s8](svbool_t pg, const int8_t *base) : "LD1RQB Zresult.B, Pg/Z, [Xarray, Xindex]" or "LD1RQB Zresult.B, Pg/Z, [Xarray, #index]" or "LD1RQB Zresult.B, Pg/Z, [Xbase, #0]" + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, sbyte* address); + + /// svint16_t svld1rq[_s16](svbool_t pg, const int16_t *base) : "LD1RQH Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD1RQH Zresult.H, Pg/Z, [Xarray, #index * 2]" or "LD1RQH Zresult.H, Pg/Z, [Xbase, #0]" + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, short* address); + + /// svint32_t svld1rq[_s32](svbool_t pg, const int32_t *base) : "LD1RQW Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2]" or "LD1RQW Zresult.S, Pg/Z, [Xarray, #index * 4]" or "LD1RQW Zresult.S, Pg/Z, [Xbase, #0]" + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, int* address); + + /// svint64_t svld1rq[_s64](svbool_t pg, const int64_t *base) : "LD1RQD Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3]" or "LD1RQD Zresult.D, Pg/Z, [Xarray, #index * 8]" or "LD1RQD Zresult.D, Pg/Z, [Xbase, #0]" + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, long* address); + + /// svuint8_t svld1rq[_u8](svbool_t pg, const uint8_t *base) : "LD1RQB Zresult.B, Pg/Z, [Xarray, Xindex]" or "LD1RQB Zresult.B, Pg/Z, [Xarray, #index]" or "LD1RQB Zresult.B, Pg/Z, [Xbase, #0]" + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, byte* address); + + /// svuint16_t svld1rq[_u16](svbool_t pg, const uint16_t *base) : "LD1RQH Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD1RQH Zresult.H, Pg/Z, [Xarray, #index * 2]" or "LD1RQH Zresult.H, Pg/Z, [Xbase, #0]" + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, ushort* address); + + /// svuint32_t svld1rq[_u32](svbool_t pg, const uint32_t *base) : "LD1RQW Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2]" or "LD1RQW Zresult.S, Pg/Z, [Xarray, #index * 4]" or "LD1RQW Zresult.S, Pg/Z, [Xbase, #0]" + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, uint* address); + + /// svuint64_t svld1rq[_u64](svbool_t pg, const uint64_t *base) : "LD1RQD Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3]" or "LD1RQD Zresult.D, Pg/Z, [Xarray, #index * 8]" or "LD1RQD Zresult.D, Pg/Z, [Xbase, #0]" + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, ulong* address); + + + /// LoadVectorByteNonFaultingZeroExtendToInt16 : Load 8-bit data and zero-extend, non-faulting + + /// svint16_t svldnf1ub_s16(svbool_t pg, const uint8_t *base) : "LDNF1B Zresult.H, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToInt16(byte* address); + + + /// LoadVectorByteNonFaultingZeroExtendToInt32 : Load 8-bit data and zero-extend, non-faulting + + /// svint32_t svldnf1ub_s32(svbool_t pg, const uint8_t *base) : "LDNF1B Zresult.S, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToInt32(byte* address); + + + /// LoadVectorByteNonFaultingZeroExtendToInt64 : Load 8-bit data and zero-extend, non-faulting + + /// svint64_t svldnf1ub_s64(svbool_t pg, const uint8_t *base) : "LDNF1B Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToInt64(byte* address); + + + /// LoadVectorByteNonFaultingZeroExtendToUInt16 : Load 8-bit data and zero-extend, non-faulting + + /// svuint16_t svldnf1ub_u16(svbool_t pg, const uint8_t *base) : "LDNF1B Zresult.H, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToUInt16(byte* address); + + + /// LoadVectorByteNonFaultingZeroExtendToUInt32 : Load 8-bit data and zero-extend, non-faulting + + /// svuint32_t svldnf1ub_u32(svbool_t pg, const uint8_t *base) : "LDNF1B Zresult.S, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToUInt32(byte* address); + + + /// LoadVectorByteNonFaultingZeroExtendToUInt64 : Load 8-bit data and zero-extend, non-faulting + + /// svuint64_t svldnf1ub_u64(svbool_t pg, const uint8_t *base) : "LDNF1B Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToUInt64(byte* address); + + + /// LoadVectorByteZeroExtendToInt16 : Load 8-bit data and zero-extend + + /// svint16_t svld1ub_s16(svbool_t pg, const uint8_t *base) : "LD1B Zresult.H, Pg/Z, [Xarray, Xindex]" or "LD1B Zresult.H, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorByteZeroExtendToInt16(Vector mask, byte* address); + + + /// LoadVectorByteZeroExtendToInt32 : Load 8-bit data and zero-extend + + /// svint32_t svld1ub_s32(svbool_t pg, const uint8_t *base) : "LD1B Zresult.S, Pg/Z, [Xarray, Xindex]" or "LD1B Zresult.S, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorByteZeroExtendToInt32(Vector mask, byte* address); + + + /// LoadVectorByteZeroExtendToInt64 : Load 8-bit data and zero-extend + + /// svint64_t svld1ub_s64(svbool_t pg, const uint8_t *base) : "LD1B Zresult.D, Pg/Z, [Xarray, Xindex]" or "LD1B Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorByteZeroExtendToInt64(Vector mask, byte* address); + + + /// LoadVectorByteZeroExtendToUInt16 : Load 8-bit data and zero-extend + + /// svuint16_t svld1ub_u16(svbool_t pg, const uint8_t *base) : "LD1B Zresult.H, Pg/Z, [Xarray, Xindex]" or "LD1B Zresult.H, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorByteZeroExtendToUInt16(Vector mask, byte* address); + + + /// LoadVectorByteZeroExtendToUInt32 : Load 8-bit data and zero-extend + + /// svuint32_t svld1ub_u32(svbool_t pg, const uint8_t *base) : "LD1B Zresult.S, Pg/Z, [Xarray, Xindex]" or "LD1B Zresult.S, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorByteZeroExtendToUInt32(Vector mask, byte* address); + + + /// LoadVectorByteZeroExtendToUInt64 : Load 8-bit data and zero-extend + + /// svuint64_t svld1ub_u64(svbool_t pg, const uint8_t *base) : "LD1B Zresult.D, Pg/Z, [Xarray, Xindex]" or "LD1B Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorByteZeroExtendToUInt64(Vector mask, byte* address); + + + /// LoadVectorInt16NonFaultingSignExtendToInt32 : Load 16-bit data and sign-extend, non-faulting + + /// svint32_t svldnf1sh_s32(svbool_t pg, const int16_t *base) : "LDNF1SH Zresult.S, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToInt32(short* address); + + + /// LoadVectorInt16NonFaultingSignExtendToInt64 : Load 16-bit data and sign-extend, non-faulting + + /// svint64_t svldnf1sh_s64(svbool_t pg, const int16_t *base) : "LDNF1SH Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToInt64(short* address); + + + /// LoadVectorInt16NonFaultingSignExtendToUInt32 : Load 16-bit data and sign-extend, non-faulting + + /// svuint32_t svldnf1sh_u32(svbool_t pg, const int16_t *base) : "LDNF1SH Zresult.S, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToUInt32(short* address); + + + /// LoadVectorInt16NonFaultingSignExtendToUInt64 : Load 16-bit data and sign-extend, non-faulting + + /// svuint64_t svldnf1sh_u64(svbool_t pg, const int16_t *base) : "LDNF1SH Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToUInt64(short* address); + + + /// LoadVectorInt16SignExtendToInt32 : Load 16-bit data and sign-extend + + /// svint32_t svld1sh_s32(svbool_t pg, const int16_t *base) : "LD1SH Zresult.S, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD1SH Zresult.S, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorInt16SignExtendToInt32(Vector mask, short* address); + + + /// LoadVectorInt16SignExtendToInt64 : Load 16-bit data and sign-extend + + /// svint64_t svld1sh_s64(svbool_t pg, const int16_t *base) : "LD1SH Zresult.D, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD1SH Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorInt16SignExtendToInt64(Vector mask, short* address); + + + /// LoadVectorInt16SignExtendToUInt32 : Load 16-bit data and sign-extend + + /// svuint32_t svld1sh_u32(svbool_t pg, const int16_t *base) : "LD1SH Zresult.S, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD1SH Zresult.S, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorInt16SignExtendToUInt32(Vector mask, short* address); + + + /// LoadVectorInt16SignExtendToUInt64 : Load 16-bit data and sign-extend + + /// svuint64_t svld1sh_u64(svbool_t pg, const int16_t *base) : "LD1SH Zresult.D, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD1SH Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorInt16SignExtendToUInt64(Vector mask, short* address); + + + /// LoadVectorInt32NonFaultingSignExtendToInt64 : Load 32-bit data and sign-extend, non-faulting + + /// svint64_t svldnf1sw_s64(svbool_t pg, const int32_t *base) : "LDNF1SW Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorInt32NonFaultingSignExtendToInt64(int* address); + + + /// LoadVectorInt32NonFaultingSignExtendToUInt64 : Load 32-bit data and sign-extend, non-faulting + + /// svuint64_t svldnf1sw_u64(svbool_t pg, const int32_t *base) : "LDNF1SW Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorInt32NonFaultingSignExtendToUInt64(int* address); + + + /// LoadVectorInt32SignExtendToInt64 : Load 32-bit data and sign-extend + + /// svint64_t svld1sw_s64(svbool_t pg, const int32_t *base) : "LD1SW Zresult.D, Pg/Z, [Xarray, Xindex, LSL #2]" or "LD1SW Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorInt32SignExtendToInt64(Vector mask, int* address); + + + /// LoadVectorInt32SignExtendToUInt64 : Load 32-bit data and sign-extend + + /// svuint64_t svld1sw_u64(svbool_t pg, const int32_t *base) : "LD1SW Zresult.D, Pg/Z, [Xarray, Xindex, LSL #2]" or "LD1SW Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorInt32SignExtendToUInt64(Vector mask, int* address); + + + /// LoadVectorNonFaulting : Unextended load, non-faulting + + /// svfloat32_t svldnf1[_f32](svbool_t pg, const float32_t *base) : "LDNF1W Zresult.S, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorNonFaulting(float* address); + + /// svfloat64_t svldnf1[_f64](svbool_t pg, const float64_t *base) : "LDNF1D Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorNonFaulting(double* address); + + /// svint8_t svldnf1[_s8](svbool_t pg, const int8_t *base) : "LDNF1B Zresult.B, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorNonFaulting(sbyte* address); + + /// svint16_t svldnf1[_s16](svbool_t pg, const int16_t *base) : "LDNF1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorNonFaulting(short* address); + + /// svint32_t svldnf1[_s32](svbool_t pg, const int32_t *base) : "LDNF1W Zresult.S, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorNonFaulting(int* address); + + /// svint64_t svldnf1[_s64](svbool_t pg, const int64_t *base) : "LDNF1D Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorNonFaulting(long* address); + + /// svuint8_t svldnf1[_u8](svbool_t pg, const uint8_t *base) : "LDNF1B Zresult.B, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorNonFaulting(byte* address); + + /// svuint16_t svldnf1[_u16](svbool_t pg, const uint16_t *base) : "LDNF1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorNonFaulting(ushort* address); + + /// svuint32_t svldnf1[_u32](svbool_t pg, const uint32_t *base) : "LDNF1W Zresult.S, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorNonFaulting(uint* address); + + /// svuint64_t svldnf1[_u64](svbool_t pg, const uint64_t *base) : "LDNF1D Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorNonFaulting(ulong* address); + + + /// LoadVectorNonTemporal : Unextended load, non-temporal + + /// svfloat32_t svldnt1[_f32](svbool_t pg, const float32_t *base) : "LDNT1W Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2]" or "LDNT1W Zresult.S, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorNonTemporal(Vector mask, float* address); + + /// svfloat64_t svldnt1[_f64](svbool_t pg, const float64_t *base) : "LDNT1D Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3]" or "LDNT1D Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorNonTemporal(Vector mask, double* address); + + /// svint8_t svldnt1[_s8](svbool_t pg, const int8_t *base) : "LDNT1B Zresult.B, Pg/Z, [Xarray, Xindex]" or "LDNT1B Zresult.B, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorNonTemporal(Vector mask, sbyte* address); + + /// svint16_t svldnt1[_s16](svbool_t pg, const int16_t *base) : "LDNT1H Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1]" or "LDNT1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorNonTemporal(Vector mask, short* address); + + /// svint32_t svldnt1[_s32](svbool_t pg, const int32_t *base) : "LDNT1W Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2]" or "LDNT1W Zresult.S, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorNonTemporal(Vector mask, int* address); + + /// svint64_t svldnt1[_s64](svbool_t pg, const int64_t *base) : "LDNT1D Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3]" or "LDNT1D Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorNonTemporal(Vector mask, long* address); + + /// svuint8_t svldnt1[_u8](svbool_t pg, const uint8_t *base) : "LDNT1B Zresult.B, Pg/Z, [Xarray, Xindex]" or "LDNT1B Zresult.B, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorNonTemporal(Vector mask, byte* address); + + /// svuint16_t svldnt1[_u16](svbool_t pg, const uint16_t *base) : "LDNT1H Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1]" or "LDNT1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorNonTemporal(Vector mask, ushort* address); + + /// svuint32_t svldnt1[_u32](svbool_t pg, const uint32_t *base) : "LDNT1W Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2]" or "LDNT1W Zresult.S, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorNonTemporal(Vector mask, uint* address); + + /// svuint64_t svldnt1[_u64](svbool_t pg, const uint64_t *base) : "LDNT1D Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3]" or "LDNT1D Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorNonTemporal(Vector mask, ulong* address); + + + /// LoadVectorSByteNonFaultingSignExtendToInt16 : Load 8-bit data and sign-extend, non-faulting + + /// svint16_t svldnf1sb_s16(svbool_t pg, const int8_t *base) : "LDNF1SB Zresult.H, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToInt16(sbyte* address); + + + /// LoadVectorSByteNonFaultingSignExtendToInt32 : Load 8-bit data and sign-extend, non-faulting + + /// svint32_t svldnf1sb_s32(svbool_t pg, const int8_t *base) : "LDNF1SB Zresult.S, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToInt32(sbyte* address); + + + /// LoadVectorSByteNonFaultingSignExtendToInt64 : Load 8-bit data and sign-extend, non-faulting + + /// svint64_t svldnf1sb_s64(svbool_t pg, const int8_t *base) : "LDNF1SB Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToInt64(sbyte* address); + + + /// LoadVectorSByteNonFaultingSignExtendToUInt16 : Load 8-bit data and sign-extend, non-faulting + + /// svuint16_t svldnf1sb_u16(svbool_t pg, const int8_t *base) : "LDNF1SB Zresult.H, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToUInt16(sbyte* address); + + + /// LoadVectorSByteNonFaultingSignExtendToUInt32 : Load 8-bit data and sign-extend, non-faulting + + /// svuint32_t svldnf1sb_u32(svbool_t pg, const int8_t *base) : "LDNF1SB Zresult.S, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToUInt32(sbyte* address); + + + /// LoadVectorSByteNonFaultingSignExtendToUInt64 : Load 8-bit data and sign-extend, non-faulting + + /// svuint64_t svldnf1sb_u64(svbool_t pg, const int8_t *base) : "LDNF1SB Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToUInt64(sbyte* address); + + + /// LoadVectorSByteSignExtendToInt16 : Load 8-bit data and sign-extend + + /// svint16_t svld1sb_s16(svbool_t pg, const int8_t *base) : "LD1SB Zresult.H, Pg/Z, [Xarray, Xindex]" or "LD1SB Zresult.H, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorSByteSignExtendToInt16(Vector mask, sbyte* address); + + + /// LoadVectorSByteSignExtendToInt32 : Load 8-bit data and sign-extend + + /// svint32_t svld1sb_s32(svbool_t pg, const int8_t *base) : "LD1SB Zresult.S, Pg/Z, [Xarray, Xindex]" or "LD1SB Zresult.S, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorSByteSignExtendToInt32(Vector mask, sbyte* address); + + + /// LoadVectorSByteSignExtendToInt64 : Load 8-bit data and sign-extend + + /// svint64_t svld1sb_s64(svbool_t pg, const int8_t *base) : "LD1SB Zresult.D, Pg/Z, [Xarray, Xindex]" or "LD1SB Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorSByteSignExtendToInt64(Vector mask, sbyte* address); + + + /// LoadVectorSByteSignExtendToUInt16 : Load 8-bit data and sign-extend + + /// svuint16_t svld1sb_u16(svbool_t pg, const int8_t *base) : "LD1SB Zresult.H, Pg/Z, [Xarray, Xindex]" or "LD1SB Zresult.H, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorSByteSignExtendToUInt16(Vector mask, sbyte* address); + + + /// LoadVectorSByteSignExtendToUInt32 : Load 8-bit data and sign-extend + + /// svuint32_t svld1sb_u32(svbool_t pg, const int8_t *base) : "LD1SB Zresult.S, Pg/Z, [Xarray, Xindex]" or "LD1SB Zresult.S, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorSByteSignExtendToUInt32(Vector mask, sbyte* address); + + + /// LoadVectorSByteSignExtendToUInt64 : Load 8-bit data and sign-extend + + /// svuint64_t svld1sb_u64(svbool_t pg, const int8_t *base) : "LD1SB Zresult.D, Pg/Z, [Xarray, Xindex]" or "LD1SB Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorSByteSignExtendToUInt64(Vector mask, sbyte* address); + + + /// LoadVectorUInt16NonFaultingZeroExtendToInt32 : Load 16-bit data and zero-extend, non-faulting + + /// svint32_t svldnf1uh_s32(svbool_t pg, const uint16_t *base) : "LDNF1H Zresult.S, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorUInt16NonFaultingZeroExtendToInt32(ushort* address); + + + /// LoadVectorUInt16NonFaultingZeroExtendToInt64 : Load 16-bit data and zero-extend, non-faulting + + /// svint64_t svldnf1uh_s64(svbool_t pg, const uint16_t *base) : "LDNF1H Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorUInt16NonFaultingZeroExtendToInt64(ushort* address); + + + /// LoadVectorUInt16NonFaultingZeroExtendToUInt32 : Load 16-bit data and zero-extend, non-faulting + + /// svuint32_t svldnf1uh_u32(svbool_t pg, const uint16_t *base) : "LDNF1H Zresult.S, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorUInt16NonFaultingZeroExtendToUInt32(ushort* address); + + + /// LoadVectorUInt16NonFaultingZeroExtendToUInt64 : Load 16-bit data and zero-extend, non-faulting + + /// svuint64_t svldnf1uh_u64(svbool_t pg, const uint16_t *base) : "LDNF1H Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorUInt16NonFaultingZeroExtendToUInt64(ushort* address); + + + /// LoadVectorUInt16ZeroExtendToInt32 : Load 16-bit data and zero-extend + + /// svint32_t svld1uh_s32(svbool_t pg, const uint16_t *base) : "LD1H Zresult.S, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD1H Zresult.S, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorUInt16ZeroExtendToInt32(Vector mask, ushort* address); + + + /// LoadVectorUInt16ZeroExtendToInt64 : Load 16-bit data and zero-extend + + /// svint64_t svld1uh_s64(svbool_t pg, const uint16_t *base) : "LD1H Zresult.D, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD1H Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorUInt16ZeroExtendToInt64(Vector mask, ushort* address); + + + /// LoadVectorUInt16ZeroExtendToUInt32 : Load 16-bit data and zero-extend + + /// svuint32_t svld1uh_u32(svbool_t pg, const uint16_t *base) : "LD1H Zresult.S, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD1H Zresult.S, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorUInt16ZeroExtendToUInt32(Vector mask, ushort* address); + + + /// LoadVectorUInt16ZeroExtendToUInt64 : Load 16-bit data and zero-extend + + /// svuint64_t svld1uh_u64(svbool_t pg, const uint16_t *base) : "LD1H Zresult.D, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD1H Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorUInt16ZeroExtendToUInt64(Vector mask, ushort* address); + + + /// LoadVectorUInt32NonFaultingZeroExtendToInt64 : Load 32-bit data and zero-extend, non-faulting + + /// svint64_t svldnf1uw_s64(svbool_t pg, const uint32_t *base) : "LDNF1W Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorUInt32NonFaultingZeroExtendToInt64(uint* address); + + + /// LoadVectorUInt32NonFaultingZeroExtendToUInt64 : Load 32-bit data and zero-extend, non-faulting + + /// svuint64_t svldnf1uw_u64(svbool_t pg, const uint32_t *base) : "LDNF1W Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorUInt32NonFaultingZeroExtendToUInt64(uint* address); + + + /// LoadVectorUInt32ZeroExtendToInt64 : Load 32-bit data and zero-extend + + /// svint64_t svld1uw_s64(svbool_t pg, const uint32_t *base) : "LD1W Zresult.D, Pg/Z, [Xarray, Xindex, LSL #2]" or "LD1W Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorUInt32ZeroExtendToInt64(Vector mask, uint* address); + + + /// LoadVectorUInt32ZeroExtendToUInt64 : Load 32-bit data and zero-extend + + /// svuint64_t svld1uw_u64(svbool_t pg, const uint32_t *base) : "LD1W Zresult.D, Pg/Z, [Xarray, Xindex, LSL #2]" or "LD1W Zresult.D, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe Vector LoadVectorUInt32ZeroExtendToUInt64(Vector mask, uint* address); + + + /// LoadVectorx2 : Load two-element tuples into two vectors + + /// svfloat32x2_t svld2[_f32](svbool_t pg, const float32_t *base) : "LD2W {Zresult0.S, Zresult1.S}, Pg/Z, [Xarray, Xindex, LSL #2]" or "LD2W {Zresult0.S, Zresult1.S}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, float* address); + + /// svfloat64x2_t svld2[_f64](svbool_t pg, const float64_t *base) : "LD2D {Zresult0.D, Zresult1.D}, Pg/Z, [Xarray, Xindex, LSL #3]" or "LD2D {Zresult0.D, Zresult1.D}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, double* address); + + /// svint8x2_t svld2[_s8](svbool_t pg, const int8_t *base) : "LD2B {Zresult0.B, Zresult1.B}, Pg/Z, [Xarray, Xindex]" or "LD2B {Zresult0.B, Zresult1.B}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, sbyte* address); + + /// svint16x2_t svld2[_s16](svbool_t pg, const int16_t *base) : "LD2H {Zresult0.H, Zresult1.H}, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD2H {Zresult0.H, Zresult1.H}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, short* address); + + /// svint32x2_t svld2[_s32](svbool_t pg, const int32_t *base) : "LD2W {Zresult0.S, Zresult1.S}, Pg/Z, [Xarray, Xindex, LSL #2]" or "LD2W {Zresult0.S, Zresult1.S}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, int* address); + + /// svint64x2_t svld2[_s64](svbool_t pg, const int64_t *base) : "LD2D {Zresult0.D, Zresult1.D}, Pg/Z, [Xarray, Xindex, LSL #3]" or "LD2D {Zresult0.D, Zresult1.D}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, long* address); + + /// svuint8x2_t svld2[_u8](svbool_t pg, const uint8_t *base) : "LD2B {Zresult0.B, Zresult1.B}, Pg/Z, [Xarray, Xindex]" or "LD2B {Zresult0.B, Zresult1.B}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, byte* address); + + /// svuint16x2_t svld2[_u16](svbool_t pg, const uint16_t *base) : "LD2H {Zresult0.H, Zresult1.H}, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD2H {Zresult0.H, Zresult1.H}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, ushort* address); + + /// svuint32x2_t svld2[_u32](svbool_t pg, const uint32_t *base) : "LD2W {Zresult0.S, Zresult1.S}, Pg/Z, [Xarray, Xindex, LSL #2]" or "LD2W {Zresult0.S, Zresult1.S}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, uint* address); + + /// svuint64x2_t svld2[_u64](svbool_t pg, const uint64_t *base) : "LD2D {Zresult0.D, Zresult1.D}, Pg/Z, [Xarray, Xindex, LSL #3]" or "LD2D {Zresult0.D, Zresult1.D}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, ulong* address); + + + /// LoadVectorx3 : Load three-element tuples into three vectors + + /// svfloat32x3_t svld3[_f32](svbool_t pg, const float32_t *base) : "LD3W {Zresult0.S - Zresult2.S}, Pg/Z, [Xarray, Xindex, LSL #2]" or "LD3W {Zresult0.S - Zresult2.S}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, float* address); + + /// svfloat64x3_t svld3[_f64](svbool_t pg, const float64_t *base) : "LD3D {Zresult0.D - Zresult2.D}, Pg/Z, [Xarray, Xindex, LSL #3]" or "LD3D {Zresult0.D - Zresult2.D}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, double* address); + + /// svint8x3_t svld3[_s8](svbool_t pg, const int8_t *base) : "LD3B {Zresult0.B - Zresult2.B}, Pg/Z, [Xarray, Xindex]" or "LD3B {Zresult0.B - Zresult2.B}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, sbyte* address); + + /// svint16x3_t svld3[_s16](svbool_t pg, const int16_t *base) : "LD3H {Zresult0.H - Zresult2.H}, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD3H {Zresult0.H - Zresult2.H}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, short* address); + + /// svint32x3_t svld3[_s32](svbool_t pg, const int32_t *base) : "LD3W {Zresult0.S - Zresult2.S}, Pg/Z, [Xarray, Xindex, LSL #2]" or "LD3W {Zresult0.S - Zresult2.S}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, int* address); + + /// svint64x3_t svld3[_s64](svbool_t pg, const int64_t *base) : "LD3D {Zresult0.D - Zresult2.D}, Pg/Z, [Xarray, Xindex, LSL #3]" or "LD3D {Zresult0.D - Zresult2.D}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, long* address); + + /// svuint8x3_t svld3[_u8](svbool_t pg, const uint8_t *base) : "LD3B {Zresult0.B - Zresult2.B}, Pg/Z, [Xarray, Xindex]" or "LD3B {Zresult0.B - Zresult2.B}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, byte* address); + + /// svuint16x3_t svld3[_u16](svbool_t pg, const uint16_t *base) : "LD3H {Zresult0.H - Zresult2.H}, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD3H {Zresult0.H - Zresult2.H}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, ushort* address); + + /// svuint32x3_t svld3[_u32](svbool_t pg, const uint32_t *base) : "LD3W {Zresult0.S - Zresult2.S}, Pg/Z, [Xarray, Xindex, LSL #2]" or "LD3W {Zresult0.S - Zresult2.S}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, uint* address); + + /// svuint64x3_t svld3[_u64](svbool_t pg, const uint64_t *base) : "LD3D {Zresult0.D - Zresult2.D}, Pg/Z, [Xarray, Xindex, LSL #3]" or "LD3D {Zresult0.D - Zresult2.D}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, ulong* address); + + + /// LoadVectorx4 : Load four-element tuples into four vectors + + /// svfloat32x4_t svld4[_f32](svbool_t pg, const float32_t *base) : "LD4W {Zresult0.S - Zresult3.S}, Pg/Z, [Xarray, Xindex, LSL #2]" or "LD4W {Zresult0.S - Zresult3.S}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, float* address); + + /// svfloat64x4_t svld4[_f64](svbool_t pg, const float64_t *base) : "LD4D {Zresult0.D - Zresult3.D}, Pg/Z, [Xarray, Xindex, LSL #3]" or "LD4D {Zresult0.D - Zresult3.D}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, double* address); + + /// svint8x4_t svld4[_s8](svbool_t pg, const int8_t *base) : "LD4B {Zresult0.B - Zresult3.B}, Pg/Z, [Xarray, Xindex]" or "LD4B {Zresult0.B - Zresult3.B}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, sbyte* address); + + /// svint16x4_t svld4[_s16](svbool_t pg, const int16_t *base) : "LD4H {Zresult0.H - Zresult3.H}, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD4H {Zresult0.H - Zresult3.H}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, short* address); + + /// svint32x4_t svld4[_s32](svbool_t pg, const int32_t *base) : "LD4W {Zresult0.S - Zresult3.S}, Pg/Z, [Xarray, Xindex, LSL #2]" or "LD4W {Zresult0.S - Zresult3.S}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, int* address); + + /// svint64x4_t svld4[_s64](svbool_t pg, const int64_t *base) : "LD4D {Zresult0.D - Zresult3.D}, Pg/Z, [Xarray, Xindex, LSL #3]" or "LD4D {Zresult0.D - Zresult3.D}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, long* address); + + /// svuint8x4_t svld4[_u8](svbool_t pg, const uint8_t *base) : "LD4B {Zresult0.B - Zresult3.B}, Pg/Z, [Xarray, Xindex]" or "LD4B {Zresult0.B - Zresult3.B}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, byte* address); + + /// svuint16x4_t svld4[_u16](svbool_t pg, const uint16_t *base) : "LD4H {Zresult0.H - Zresult3.H}, Pg/Z, [Xarray, Xindex, LSL #1]" or "LD4H {Zresult0.H - Zresult3.H}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, ushort* address); + + /// svuint32x4_t svld4[_u32](svbool_t pg, const uint32_t *base) : "LD4W {Zresult0.S - Zresult3.S}, Pg/Z, [Xarray, Xindex, LSL #2]" or "LD4W {Zresult0.S - Zresult3.S}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, uint* address); + + /// svuint64x4_t svld4[_u64](svbool_t pg, const uint64_t *base) : "LD4D {Zresult0.D - Zresult3.D}, Pg/Z, [Xarray, Xindex, LSL #3]" or "LD4D {Zresult0.D - Zresult3.D}, Pg/Z, [Xbase, #0, MUL VL]" + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, ulong* address); + + + /// PrefetchBytes : Prefetch bytes + + /// void svprfb(svbool_t pg, const void *base, enum svprfop op) : "PRFB op, Pg, [Xarray, Xindex]" or "PRFB op, Pg, [Xbase, #0, MUL VL]" + public static unsafe void PrefetchBytes(Vector mask, void* address, [ConstantExpected] SvePrefetchType prefetchType); + + + /// PrefetchInt16 : Prefetch halfwords + + /// void svprfh(svbool_t pg, const void *base, enum svprfop op) : "PRFH op, Pg, [Xarray, Xindex, LSL #1]" or "PRFH op, Pg, [Xbase, #0, MUL VL]" + public static unsafe void PrefetchInt16(Vector mask, void* address, [ConstantExpected] SvePrefetchType prefetchType); + + + /// PrefetchInt32 : Prefetch words + + /// void svprfw(svbool_t pg, const void *base, enum svprfop op) : "PRFW op, Pg, [Xarray, Xindex, LSL #2]" or "PRFW op, Pg, [Xbase, #0, MUL VL]" + public static unsafe void PrefetchInt32(Vector mask, void* address, [ConstantExpected] SvePrefetchType prefetchType); + + + /// PrefetchInt64 : Prefetch doublewords + + /// void svprfd(svbool_t pg, const void *base, enum svprfop op) : "PRFD op, Pg, [Xarray, Xindex, LSL #3]" or "PRFD op, Pg, [Xbase, #0, MUL VL]" + public static unsafe void PrefetchInt64(Vector mask, void* address, [ConstantExpected] SvePrefetchType prefetchType); + + + /// total method signatures: 138 + /// total method names: 63 +} + + + /// Rejected: + /// public static unsafe Vector LoadVector(Vector mask, float* address, long vnum); // svld1_vnum[_f32] + /// public static unsafe Vector LoadVector(Vector mask, double* address, long vnum); // svld1_vnum[_f64] + /// public static unsafe Vector LoadVector(Vector mask, sbyte* address, long vnum); // svld1_vnum[_s8] + /// public static unsafe Vector LoadVector(Vector mask, short* address, long vnum); // svld1_vnum[_s16] + /// public static unsafe Vector LoadVector(Vector mask, int* address, long vnum); // svld1_vnum[_s32] + /// public static unsafe Vector LoadVector(Vector mask, long* address, long vnum); // svld1_vnum[_s64] + /// public static unsafe Vector LoadVector(Vector mask, byte* address, long vnum); // svld1_vnum[_u8] + /// public static unsafe Vector LoadVector(Vector mask, ushort* address, long vnum); // svld1_vnum[_u16] + /// public static unsafe Vector LoadVector(Vector mask, uint* address, long vnum); // svld1_vnum[_u32] + /// public static unsafe Vector LoadVector(Vector mask, ulong* address, long vnum); // svld1_vnum[_u64] + /// public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToInt16(byte* address, long vnum); // svldnf1ub_vnum_s16 + /// public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToInt32(byte* address, long vnum); // svldnf1ub_vnum_s32 + /// public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToInt64(byte* address, long vnum); // svldnf1ub_vnum_s64 + /// public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToUInt16(byte* address, long vnum); // svldnf1ub_vnum_u16 + /// public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToUInt32(byte* address, long vnum); // svldnf1ub_vnum_u32 + /// public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToUInt64(byte* address, long vnum); // svldnf1ub_vnum_u64 + /// public static unsafe Vector LoadVectorByteZeroExtendToInt16(Vector mask, byte* address, long vnum); // svld1ub_vnum_s16 + /// public static unsafe Vector LoadVectorByteZeroExtendToInt32(Vector mask, byte* address, long vnum); // svld1ub_vnum_s32 + /// public static unsafe Vector LoadVectorByteZeroExtendToInt64(Vector mask, byte* address, long vnum); // svld1ub_vnum_s64 + /// public static unsafe Vector LoadVectorByteZeroExtendToUInt16(Vector mask, byte* address, long vnum); // svld1ub_vnum_u16 + /// public static unsafe Vector LoadVectorByteZeroExtendToUInt32(Vector mask, byte* address, long vnum); // svld1ub_vnum_u32 + /// public static unsafe Vector LoadVectorByteZeroExtendToUInt64(Vector mask, byte* address, long vnum); // svld1ub_vnum_u64 + /// public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToInt32(short* address, long vnum); // svldnf1sh_vnum_s32 + /// public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToInt64(short* address, long vnum); // svldnf1sh_vnum_s64 + /// public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToUInt32(short* address, long vnum); // svldnf1sh_vnum_u32 + /// public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToUInt64(short* address, long vnum); // svldnf1sh_vnum_u64 + /// public static unsafe Vector LoadVectorInt16SignExtendToInt32(Vector mask, short* address, long vnum); // svld1sh_vnum_s32 + /// public static unsafe Vector LoadVectorInt16SignExtendToInt64(Vector mask, short* address, long vnum); // svld1sh_vnum_s64 + /// public static unsafe Vector LoadVectorInt16SignExtendToUInt32(Vector mask, short* address, long vnum); // svld1sh_vnum_u32 + /// public static unsafe Vector LoadVectorInt16SignExtendToUInt64(Vector mask, short* address, long vnum); // svld1sh_vnum_u64 + /// public static unsafe Vector LoadVectorInt32NonFaultingSignExtendToInt64(int* address, long vnum); // svldnf1sw_vnum_s64 + /// public static unsafe Vector LoadVectorInt32NonFaultingSignExtendToUInt64(int* address, long vnum); // svldnf1sw_vnum_u64 + /// public static unsafe Vector LoadVectorInt32SignExtendToInt64(Vector mask, int* address, long vnum); // svld1sw_vnum_s64 + /// public static unsafe Vector LoadVectorInt32SignExtendToUInt64(Vector mask, int* address, long vnum); // svld1sw_vnum_u64 + /// public static unsafe Vector LoadVectorNonFaulting(float* address, long vnum); // svldnf1_vnum[_f32] + /// public static unsafe Vector LoadVectorNonFaulting(double* address, long vnum); // svldnf1_vnum[_f64] + /// public static unsafe Vector LoadVectorNonFaulting(sbyte* address, long vnum); // svldnf1_vnum[_s8] + /// public static unsafe Vector LoadVectorNonFaulting(short* address, long vnum); // svldnf1_vnum[_s16] + /// public static unsafe Vector LoadVectorNonFaulting(int* address, long vnum); // svldnf1_vnum[_s32] + /// public static unsafe Vector LoadVectorNonFaulting(long* address, long vnum); // svldnf1_vnum[_s64] + /// public static unsafe Vector LoadVectorNonFaulting(byte* address, long vnum); // svldnf1_vnum[_u8] + /// public static unsafe Vector LoadVectorNonFaulting(ushort* address, long vnum); // svldnf1_vnum[_u16] + /// public static unsafe Vector LoadVectorNonFaulting(uint* address, long vnum); // svldnf1_vnum[_u32] + /// public static unsafe Vector LoadVectorNonFaulting(ulong* address, long vnum); // svldnf1_vnum[_u64] + /// public static unsafe Vector LoadVectorNonTemporal(Vector mask, float* address, long vnum); // svldnt1_vnum[_f32] + /// public static unsafe Vector LoadVectorNonTemporal(Vector mask, double* address, long vnum); // svldnt1_vnum[_f64] + /// public static unsafe Vector LoadVectorNonTemporal(Vector mask, sbyte* address, long vnum); // svldnt1_vnum[_s8] + /// public static unsafe Vector LoadVectorNonTemporal(Vector mask, short* address, long vnum); // svldnt1_vnum[_s16] + /// public static unsafe Vector LoadVectorNonTemporal(Vector mask, int* address, long vnum); // svldnt1_vnum[_s32] + /// public static unsafe Vector LoadVectorNonTemporal(Vector mask, long* address, long vnum); // svldnt1_vnum[_s64] + /// public static unsafe Vector LoadVectorNonTemporal(Vector mask, byte* address, long vnum); // svldnt1_vnum[_u8] + /// public static unsafe Vector LoadVectorNonTemporal(Vector mask, ushort* address, long vnum); // svldnt1_vnum[_u16] + /// public static unsafe Vector LoadVectorNonTemporal(Vector mask, uint* address, long vnum); // svldnt1_vnum[_u32] + /// public static unsafe Vector LoadVectorNonTemporal(Vector mask, ulong* address, long vnum); // svldnt1_vnum[_u64] + /// public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToInt16(sbyte* address, long vnum); // svldnf1sb_vnum_s16 + /// public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToInt32(sbyte* address, long vnum); // svldnf1sb_vnum_s32 + /// public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToInt64(sbyte* address, long vnum); // svldnf1sb_vnum_s64 + /// public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToUInt16(sbyte* address, long vnum); // svldnf1sb_vnum_u16 + /// public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToUInt32(sbyte* address, long vnum); // svldnf1sb_vnum_u32 + /// public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToUInt64(sbyte* address, long vnum); // svldnf1sb_vnum_u64 + /// public static unsafe Vector LoadVectorSByteSignExtendToInt16(Vector mask, sbyte* address, long vnum); // svld1sb_vnum_s16 + /// public static unsafe Vector LoadVectorSByteSignExtendToInt32(Vector mask, sbyte* address, long vnum); // svld1sb_vnum_s32 + /// public static unsafe Vector LoadVectorSByteSignExtendToInt64(Vector mask, sbyte* address, long vnum); // svld1sb_vnum_s64 + /// public static unsafe Vector LoadVectorSByteSignExtendToUInt16(Vector mask, sbyte* address, long vnum); // svld1sb_vnum_u16 + /// public static unsafe Vector LoadVectorSByteSignExtendToUInt32(Vector mask, sbyte* address, long vnum); // svld1sb_vnum_u32 + /// public static unsafe Vector LoadVectorSByteSignExtendToUInt64(Vector mask, sbyte* address, long vnum); // svld1sb_vnum_u64 + /// public static unsafe Vector LoadVectorUInt16NonFaultingZeroExtendToInt32(ushort* address, long vnum); // svldnf1uh_vnum_s32 + /// public static unsafe Vector LoadVectorUInt16NonFaultingZeroExtendToInt64(ushort* address, long vnum); // svldnf1uh_vnum_s64 + /// public static unsafe Vector LoadVectorUInt16NonFaultingZeroExtendToUInt32(ushort* address, long vnum); // svldnf1uh_vnum_u32 + /// public static unsafe Vector LoadVectorUInt16NonFaultingZeroExtendToUInt64(ushort* address, long vnum); // svldnf1uh_vnum_u64 + /// public static unsafe Vector LoadVectorUInt16ZeroExtendToInt32(Vector mask, ushort* address, long vnum); // svld1uh_vnum_s32 + /// public static unsafe Vector LoadVectorUInt16ZeroExtendToInt64(Vector mask, ushort* address, long vnum); // svld1uh_vnum_s64 + /// public static unsafe Vector LoadVectorUInt16ZeroExtendToUInt32(Vector mask, ushort* address, long vnum); // svld1uh_vnum_u32 + /// public static unsafe Vector LoadVectorUInt16ZeroExtendToUInt64(Vector mask, ushort* address, long vnum); // svld1uh_vnum_u64 + /// public static unsafe Vector LoadVectorUInt32NonFaultingZeroExtendToInt64(uint* address, long vnum); // svldnf1uw_vnum_s64 + /// public static unsafe Vector LoadVectorUInt32NonFaultingZeroExtendToUInt64(uint* address, long vnum); // svldnf1uw_vnum_u64 + /// public static unsafe Vector LoadVectorUInt32ZeroExtendToInt64(Vector mask, uint* address, long vnum); // svld1uw_vnum_s64 + /// public static unsafe Vector LoadVectorUInt32ZeroExtendToUInt64(Vector mask, uint* address, long vnum); // svld1uw_vnum_u64 + /// public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, float* address, long vnum); // svld2_vnum[_f32] + /// public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, double* address, long vnum); // svld2_vnum[_f64] + /// public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, sbyte* address, long vnum); // svld2_vnum[_s8] + /// public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, short* address, long vnum); // svld2_vnum[_s16] + /// public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, int* address, long vnum); // svld2_vnum[_s32] + /// public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, long* address, long vnum); // svld2_vnum[_s64] + /// public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, byte* address, long vnum); // svld2_vnum[_u8] + /// public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, ushort* address, long vnum); // svld2_vnum[_u16] + /// public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, uint* address, long vnum); // svld2_vnum[_u32] + /// public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, ulong* address, long vnum); // svld2_vnum[_u64] + /// public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, float* address, long vnum); // svld3_vnum[_f32] + /// public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, double* address, long vnum); // svld3_vnum[_f64] + /// public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, sbyte* address, long vnum); // svld3_vnum[_s8] + /// public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, short* address, long vnum); // svld3_vnum[_s16] + /// public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, int* address, long vnum); // svld3_vnum[_s32] + /// public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, long* address, long vnum); // svld3_vnum[_s64] + /// public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, byte* address, long vnum); // svld3_vnum[_u8] + /// public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, ushort* address, long vnum); // svld3_vnum[_u16] + /// public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, uint* address, long vnum); // svld3_vnum[_u32] + /// public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, ulong* address, long vnum); // svld3_vnum[_u64] + /// public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, float* address, long vnum); // svld4_vnum[_f32] + /// public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, double* address, long vnum); // svld4_vnum[_f64] + /// public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, sbyte* address, long vnum); // svld4_vnum[_s8] + /// public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, short* address, long vnum); // svld4_vnum[_s16] + /// public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, int* address, long vnum); // svld4_vnum[_s32] + /// public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, long* address, long vnum); // svld4_vnum[_s64] + /// public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, byte* address, long vnum); // svld4_vnum[_u8] + /// public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, ushort* address, long vnum); // svld4_vnum[_u16] + /// public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, uint* address, long vnum); // svld4_vnum[_u32] + /// public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, ulong* address, long vnum); // svld4_vnum[_u64] + /// public static unsafe void PrefetchBytes(Vector mask, void* address, long vnum, [ConstantExpected] SvePrefetchType prefetchType); // svprfb_vnum + /// public static unsafe void PrefetchInt16(Vector mask, void* address, long vnum, [ConstantExpected] SvePrefetchType prefetchType); // svprfh_vnum + /// public static unsafe void PrefetchInt32(Vector mask, void* address, long vnum, [ConstantExpected] SvePrefetchType prefetchType); // svprfw_vnum + /// public static unsafe void PrefetchInt64(Vector mask, void* address, long vnum, [ConstantExpected] SvePrefetchType prefetchType); // svprfd_vnum + /// Total Rejected: 112 + + /// Total ACLE covered across API: 250 + diff --git a/sve_api/out_api/apiraw_FEAT_SVE__mask.cs b/sve_api/out_api/apiraw_FEAT_SVE__mask.cs new file mode 100644 index 0000000000000..03ee58bf8c2fa --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_SVE__mask.cs @@ -0,0 +1,1789 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: mask +{ + + /// T: float, double + public static unsafe Vector AbsoluteCompareGreaterThan(Vector left, Vector right); // FACGT // predicated + + /// T: float, double + public static unsafe Vector AbsoluteCompareGreaterThanOrEqual(Vector left, Vector right); // FACGE // predicated + + /// T: float, double + public static unsafe Vector AbsoluteCompareLessThan(Vector left, Vector right); // FACGT // predicated + + /// T: float, double + public static unsafe Vector AbsoluteCompareLessThanOrEqual(Vector left, Vector right); // FACGE // predicated + + /// T: float, double, int, long, uint, ulong + public static unsafe Vector Compact(Vector mask, Vector value); // COMPACT + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CompareEqual(Vector left, Vector right); // FCMEQ or CMPEQ // predicated + + /// T: [sbyte, long], [short, long], [int, long] + public static unsafe Vector CompareEqual(Vector left, Vector right); // CMPEQ // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CompareGreaterThan(Vector left, Vector right); // FCMGT or CMPGT or CMPHI // predicated + + /// T: [sbyte, long], [short, long], [int, long], [byte, ulong], [ushort, ulong], [uint, ulong] + public static unsafe Vector CompareGreaterThan(Vector left, Vector right); // CMPGT or CMPHI // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right); // FCMGE or CMPGE or CMPHS // predicated + + /// T: [sbyte, long], [short, long], [int, long], [byte, ulong], [ushort, ulong], [uint, ulong] + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right); // CMPGE or CMPHS // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CompareLessThan(Vector left, Vector right); // FCMGT or CMPGT or CMPHI // predicated + + /// T: [sbyte, long], [short, long], [int, long], [byte, ulong], [ushort, ulong], [uint, ulong] + public static unsafe Vector CompareLessThan(Vector left, Vector right); // CMPLT or CMPLO // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right); // FCMGE or CMPGE or CMPHS // predicated + + /// T: [sbyte, long], [short, long], [int, long], [byte, ulong], [ushort, ulong], [uint, ulong] + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right); // CMPLE or CMPLS // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right); // FCMNE or CMPNE // predicated + + /// T: [sbyte, long], [short, long], [int, long] + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right); // CMPNE // predicated + + /// T: float, double + public static unsafe Vector CompareUnordered(Vector left, Vector right); // FCMUO // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data); // CLASTA // MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe T ConditionalExtractAfterLastActiveElement(Vector mask, T defaultValues, Vector data); // CLASTA // MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data); // CLASTA // MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data); // CLASTB // MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe T ConditionalExtractLastActiveElement(Vector mask, T defaultValues, Vector data); // CLASTB // MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data); // CLASTB // MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right); // SEL + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask); // BRKA // predicated + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right); // BRKPA + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask); // BRKB // predicated + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right); // BRKPB + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask); // BRKN // predicated + + public static unsafe Vector CreateFalseMaskByte(); // PFALSE + + public static unsafe Vector CreateFalseMaskDouble(); // PFALSE + + public static unsafe Vector CreateFalseMaskInt16(); // PFALSE + + public static unsafe Vector CreateFalseMaskInt32(); // PFALSE + + public static unsafe Vector CreateFalseMaskInt64(); // PFALSE + + public static unsafe Vector CreateFalseMaskSByte(); // PFALSE + + public static unsafe Vector CreateFalseMaskSingle(); // PFALSE + + public static unsafe Vector CreateFalseMaskUInt16(); // PFALSE + + public static unsafe Vector CreateFalseMaskUInt32(); // PFALSE + + public static unsafe Vector CreateFalseMaskUInt64(); // PFALSE + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask); // PFIRST + + /// T: byte, ushort, uint, ulong + public static unsafe Vector CreateMaskForNextActiveElement(Vector totalMask, Vector fromMask); // PNEXT + + public static unsafe Vector CreateTrueMaskByte([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // PTRUE + + public static unsafe Vector CreateTrueMaskDouble([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // PTRUE + + public static unsafe Vector CreateTrueMaskInt16([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // PTRUE + + public static unsafe Vector CreateTrueMaskInt32([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // PTRUE + + public static unsafe Vector CreateTrueMaskInt64([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // PTRUE + + public static unsafe Vector CreateTrueMaskSByte([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // PTRUE + + public static unsafe Vector CreateTrueMaskSingle([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // PTRUE + + public static unsafe Vector CreateTrueMaskUInt16([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // PTRUE + + public static unsafe Vector CreateTrueMaskUInt32([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // PTRUE + + public static unsafe Vector CreateTrueMaskUInt64([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // PTRUE + + public static unsafe Vector CreateWhileLessThanMask16Bit(int left, int right); // WHILELT + + public static unsafe Vector CreateWhileLessThanMask16Bit(long left, long right); // WHILELT + + public static unsafe Vector CreateWhileLessThanMask16Bit(uint left, uint right); // WHILELO + + public static unsafe Vector CreateWhileLessThanMask16Bit(ulong left, ulong right); // WHILELO + + public static unsafe Vector CreateWhileLessThanMask32Bit(int left, int right); // WHILELT + + public static unsafe Vector CreateWhileLessThanMask32Bit(long left, long right); // WHILELT + + public static unsafe Vector CreateWhileLessThanMask32Bit(uint left, uint right); // WHILELO + + public static unsafe Vector CreateWhileLessThanMask32Bit(ulong left, ulong right); // WHILELO + + public static unsafe Vector CreateWhileLessThanMask64Bit(int left, int right); // WHILELT + + public static unsafe Vector CreateWhileLessThanMask64Bit(long left, long right); // WHILELT + + public static unsafe Vector CreateWhileLessThanMask64Bit(uint left, uint right); // WHILELO + + public static unsafe Vector CreateWhileLessThanMask64Bit(ulong left, ulong right); // WHILELO + + public static unsafe Vector CreateWhileLessThanMask8Bit(int left, int right); // WHILELT + + public static unsafe Vector CreateWhileLessThanMask8Bit(long left, long right); // WHILELT + + public static unsafe Vector CreateWhileLessThanMask8Bit(uint left, uint right); // WHILELO + + public static unsafe Vector CreateWhileLessThanMask8Bit(ulong left, ulong right); // WHILELO + + public static unsafe Vector CreateWhileLessThanOrEqualMask16Bit(int left, int right); // WHILELE + + public static unsafe Vector CreateWhileLessThanOrEqualMask16Bit(long left, long right); // WHILELE + + public static unsafe Vector CreateWhileLessThanOrEqualMask16Bit(uint left, uint right); // WHILELS + + public static unsafe Vector CreateWhileLessThanOrEqualMask16Bit(ulong left, ulong right); // WHILELS + + public static unsafe Vector CreateWhileLessThanOrEqualMask32Bit(int left, int right); // WHILELE + + public static unsafe Vector CreateWhileLessThanOrEqualMask32Bit(long left, long right); // WHILELE + + public static unsafe Vector CreateWhileLessThanOrEqualMask32Bit(uint left, uint right); // WHILELS + + public static unsafe Vector CreateWhileLessThanOrEqualMask32Bit(ulong left, ulong right); // WHILELS + + public static unsafe Vector CreateWhileLessThanOrEqualMask64Bit(int left, int right); // WHILELE + + public static unsafe Vector CreateWhileLessThanOrEqualMask64Bit(long left, long right); // WHILELE + + public static unsafe Vector CreateWhileLessThanOrEqualMask64Bit(uint left, uint right); // WHILELS + + public static unsafe Vector CreateWhileLessThanOrEqualMask64Bit(ulong left, ulong right); // WHILELS + + public static unsafe Vector CreateWhileLessThanOrEqualMask8Bit(int left, int right); // WHILELE + + public static unsafe Vector CreateWhileLessThanOrEqualMask8Bit(long left, long right); // WHILELE + + public static unsafe Vector CreateWhileLessThanOrEqualMask8Bit(uint left, uint right); // WHILELS + + public static unsafe Vector CreateWhileLessThanOrEqualMask8Bit(ulong left, ulong right); // WHILELS + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe T ExtractAfterLastScalar(Vector value); // LASTA // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ExtractAfterLastVector(Vector value); // LASTA // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe T ExtractLastScalar(Vector value); // LASTB // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ExtractLastVector(Vector value); // LASTB // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index); // EXT // MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask); // PTEST + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask); // PTEST + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask); // PTEST + + + // All patterns used by PTRUE. + public enum SveMaskPattern : byte + { + LargestPowerOf2 = 0, // The largest power of 2. + VectorCount1 = 1, // 1 element. + VectorCount2 = 2, // 2 elements. + VectorCount3 = 3, // 3 elements. + VectorCount4 = 4, // 4 elements. + VectorCount5 = 5, // 5 elements. + VectorCount6 = 6, // 6 elements. + VectorCount7 = 7, // 7 elements. + VectorCount8 = 8, // 8 elements. + VectorCount16 = 9, // 16 elements. + VectorCount32 = 10, // 32 elements. + VectorCount64 = 11, // 64 elements. + VectorCount128 = 12, // 128 elements. + VectorCount256 = 13, // 256 elements. + LargestMultipleOf4 = 29, // The largest multiple of 4. + LargestMultipleOf3 = 30, // The largest multiple of 3. + All = 31 // All available (implicitly a multiple of two). + }; + + /// total method signatures: 92 + + + /// Optional Entries: + + /// T: float, double + public static unsafe Vector AbsoluteCompareGreaterThan(Vector left, T right); // FACGT // predicated + + /// T: float, double + public static unsafe Vector AbsoluteCompareGreaterThanOrEqual(Vector left, T right); // FACGE // predicated + + /// T: float, double + public static unsafe Vector AbsoluteCompareLessThan(Vector left, T right); // FACGT // predicated + + /// T: float, double + public static unsafe Vector AbsoluteCompareLessThanOrEqual(Vector left, T right); // FACGE // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CompareEqual(Vector left, T right); // FCMEQ or CMPEQ // predicated + + /// T: sbyte, short, int + public static unsafe Vector CompareEqual(Vector left, long right); // CMPEQ // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CompareGreaterThan(Vector left, T right); // FCMGT or CMPGT or CMPHI // predicated + + /// T: sbyte, short, int + public static unsafe Vector CompareGreaterThan(Vector left, long right); // CMPGT // predicated + + /// T: byte, ushort, uint + public static unsafe Vector CompareGreaterThan(Vector left, ulong right); // CMPHI // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, T right); // FCMGE or CMPGE or CMPHS // predicated + + /// T: sbyte, short, int + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, long right); // CMPGE // predicated + + /// T: byte, ushort, uint + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, ulong right); // CMPHS // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CompareLessThan(Vector left, T right); // FCMLT or FCMGT or CMPLT or CMPGT or CMPLO or CMPHI // predicated + + /// T: sbyte, short, int + public static unsafe Vector CompareLessThan(Vector left, long right); // CMPLT // predicated + + /// T: byte, ushort, uint + public static unsafe Vector CompareLessThan(Vector left, ulong right); // CMPLO // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CompareLessThanOrEqual(Vector left, T right); // FCMLE or FCMGE or CMPLE or CMPGE or CMPLS or CMPHS // predicated + + /// T: sbyte, short, int + public static unsafe Vector CompareLessThanOrEqual(Vector left, long right); // CMPLE // predicated + + /// T: byte, ushort, uint + public static unsafe Vector CompareLessThanOrEqual(Vector left, ulong right); // CMPLS // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CompareNotEqualTo(Vector left, T right); // FCMNE or CMPNE // predicated + + /// T: sbyte, short, int + public static unsafe Vector CompareNotEqualTo(Vector left, long right); // CMPNE // predicated + + /// T: float, double + public static unsafe Vector CompareUnordered(Vector left, T right); // FCMUO // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe T ConditionalExtractAfterLastActiveElement(Vector mask, T defaultValue, Vector data); // CLASTA + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe T ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, T defaultScalar, Vector data); // CLASTA + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe T ConditionalExtractLastActiveElement(Vector mask, T defaultValue, Vector data); // CLASTB + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe T ConditionalExtractLastActiveElementAndReplicate(Vector mask, T fallback, Vector data); // CLASTB + + /// total optional method signatures: 25 + +} + + +/// Full API +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: mask +{ + /// AbsoluteCompareGreaterThan : Absolute compare greater than + + /// svbool_t svacgt[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FACGT Presult.S, Pg/Z, Zop1.S, Zop2.S" + public static unsafe Vector AbsoluteCompareGreaterThan(Vector left, Vector right); + + /// svbool_t svacgt[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FACGT Presult.D, Pg/Z, Zop1.D, Zop2.D" + public static unsafe Vector AbsoluteCompareGreaterThan(Vector left, Vector right); + + + /// AbsoluteCompareGreaterThanOrEqual : Absolute compare greater than or equal to + + /// svbool_t svacge[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FACGE Presult.S, Pg/Z, Zop1.S, Zop2.S" + public static unsafe Vector AbsoluteCompareGreaterThanOrEqual(Vector left, Vector right); + + /// svbool_t svacge[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FACGE Presult.D, Pg/Z, Zop1.D, Zop2.D" + public static unsafe Vector AbsoluteCompareGreaterThanOrEqual(Vector left, Vector right); + + + /// AbsoluteCompareLessThan : Absolute compare less than + + /// svbool_t svaclt[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FACGT Presult.S, Pg/Z, Zop2.S, Zop1.S" + public static unsafe Vector AbsoluteCompareLessThan(Vector left, Vector right); + + /// svbool_t svaclt[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FACGT Presult.D, Pg/Z, Zop2.D, Zop1.D" + public static unsafe Vector AbsoluteCompareLessThan(Vector left, Vector right); + + + /// AbsoluteCompareLessThanOrEqual : Absolute compare less than or equal to + + /// svbool_t svacle[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FACGE Presult.S, Pg/Z, Zop2.S, Zop1.S" + public static unsafe Vector AbsoluteCompareLessThanOrEqual(Vector left, Vector right); + + /// svbool_t svacle[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FACGE Presult.D, Pg/Z, Zop2.D, Zop1.D" + public static unsafe Vector AbsoluteCompareLessThanOrEqual(Vector left, Vector right); + + + /// Compact : Shuffle active elements of vector to the right and fill with zero + + /// svfloat32_t svcompact[_f32](svbool_t pg, svfloat32_t op) : "COMPACT Zresult.S, Pg, Zop.S" + public static unsafe Vector Compact(Vector mask, Vector value); + + /// svfloat64_t svcompact[_f64](svbool_t pg, svfloat64_t op) : "COMPACT Zresult.D, Pg, Zop.D" + public static unsafe Vector Compact(Vector mask, Vector value); + + /// svint32_t svcompact[_s32](svbool_t pg, svint32_t op) : "COMPACT Zresult.S, Pg, Zop.S" + public static unsafe Vector Compact(Vector mask, Vector value); + + /// svint64_t svcompact[_s64](svbool_t pg, svint64_t op) : "COMPACT Zresult.D, Pg, Zop.D" + public static unsafe Vector Compact(Vector mask, Vector value); + + /// svuint32_t svcompact[_u32](svbool_t pg, svuint32_t op) : "COMPACT Zresult.S, Pg, Zop.S" + public static unsafe Vector Compact(Vector mask, Vector value); + + /// svuint64_t svcompact[_u64](svbool_t pg, svuint64_t op) : "COMPACT Zresult.D, Pg, Zop.D" + public static unsafe Vector Compact(Vector mask, Vector value); + + + /// CompareEqual : Compare equal to + + /// svbool_t svcmpeq[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FCMEQ Presult.S, Pg/Z, Zop1.S, Zop2.S" + public static unsafe Vector CompareEqual(Vector left, Vector right); + + /// svbool_t svcmpeq[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FCMEQ Presult.D, Pg/Z, Zop1.D, Zop2.D" + public static unsafe Vector CompareEqual(Vector left, Vector right); + + /// svbool_t svcmpeq[_s8](svbool_t pg, svint8_t op1, svint8_t op2) : "CMPEQ Presult.B, Pg/Z, Zop1.B, Zop2.B" + public static unsafe Vector CompareEqual(Vector left, Vector right); + + /// svbool_t svcmpeq[_s16](svbool_t pg, svint16_t op1, svint16_t op2) : "CMPEQ Presult.H, Pg/Z, Zop1.H, Zop2.H" + public static unsafe Vector CompareEqual(Vector left, Vector right); + + /// svbool_t svcmpeq[_s32](svbool_t pg, svint32_t op1, svint32_t op2) : "CMPEQ Presult.S, Pg/Z, Zop1.S, Zop2.S" + public static unsafe Vector CompareEqual(Vector left, Vector right); + + /// svbool_t svcmpeq[_s64](svbool_t pg, svint64_t op1, svint64_t op2) : "CMPEQ Presult.D, Pg/Z, Zop1.D, Zop2.D" + public static unsafe Vector CompareEqual(Vector left, Vector right); + + /// svbool_t svcmpeq[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) : "CMPEQ Presult.B, Pg/Z, Zop1.B, Zop2.B" + public static unsafe Vector CompareEqual(Vector left, Vector right); + + /// svbool_t svcmpeq[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) : "CMPEQ Presult.H, Pg/Z, Zop1.H, Zop2.H" + public static unsafe Vector CompareEqual(Vector left, Vector right); + + /// svbool_t svcmpeq[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) : "CMPEQ Presult.S, Pg/Z, Zop1.S, Zop2.S" + public static unsafe Vector CompareEqual(Vector left, Vector right); + + /// svbool_t svcmpeq[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) : "CMPEQ Presult.D, Pg/Z, Zop1.D, Zop2.D" + public static unsafe Vector CompareEqual(Vector left, Vector right); + + /// svbool_t svcmpeq_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2) : "CMPEQ Presult.B, Pg/Z, Zop1.B, Zop2.D" + public static unsafe Vector CompareEqual(Vector left, Vector right); + + /// svbool_t svcmpeq_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2) : "CMPEQ Presult.H, Pg/Z, Zop1.H, Zop2.D" + public static unsafe Vector CompareEqual(Vector left, Vector right); + + /// svbool_t svcmpeq_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2) : "CMPEQ Presult.S, Pg/Z, Zop1.S, Zop2.D" + public static unsafe Vector CompareEqual(Vector left, Vector right); + + + /// CompareGreaterThan : Compare greater than + + /// svbool_t svcmpgt[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FCMGT Presult.S, Pg/Z, Zop1.S, Zop2.S" + public static unsafe Vector CompareGreaterThan(Vector left, Vector right); + + /// svbool_t svcmpgt[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FCMGT Presult.D, Pg/Z, Zop1.D, Zop2.D" + public static unsafe Vector CompareGreaterThan(Vector left, Vector right); + + /// svbool_t svcmpgt[_s8](svbool_t pg, svint8_t op1, svint8_t op2) : "CMPGT Presult.B, Pg/Z, Zop1.B, Zop2.B" + public static unsafe Vector CompareGreaterThan(Vector left, Vector right); + + /// svbool_t svcmpgt[_s16](svbool_t pg, svint16_t op1, svint16_t op2) : "CMPGT Presult.H, Pg/Z, Zop1.H, Zop2.H" + public static unsafe Vector CompareGreaterThan(Vector left, Vector right); + + /// svbool_t svcmpgt[_s32](svbool_t pg, svint32_t op1, svint32_t op2) : "CMPGT Presult.S, Pg/Z, Zop1.S, Zop2.S" + public static unsafe Vector CompareGreaterThan(Vector left, Vector right); + + /// svbool_t svcmpgt[_s64](svbool_t pg, svint64_t op1, svint64_t op2) : "CMPGT Presult.D, Pg/Z, Zop1.D, Zop2.D" + public static unsafe Vector CompareGreaterThan(Vector left, Vector right); + + /// svbool_t svcmpgt[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) : "CMPHI Presult.B, Pg/Z, Zop1.B, Zop2.B" + public static unsafe Vector CompareGreaterThan(Vector left, Vector right); + + /// svbool_t svcmpgt[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) : "CMPHI Presult.H, Pg/Z, Zop1.H, Zop2.H" + public static unsafe Vector CompareGreaterThan(Vector left, Vector right); + + /// svbool_t svcmpgt[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) : "CMPHI Presult.S, Pg/Z, Zop1.S, Zop2.S" + public static unsafe Vector CompareGreaterThan(Vector left, Vector right); + + /// svbool_t svcmpgt[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) : "CMPHI Presult.D, Pg/Z, Zop1.D, Zop2.D" + public static unsafe Vector CompareGreaterThan(Vector left, Vector right); + + /// svbool_t svcmpgt_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2) : "CMPGT Presult.B, Pg/Z, Zop1.B, Zop2.D" + public static unsafe Vector CompareGreaterThan(Vector left, Vector right); + + /// svbool_t svcmpgt_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2) : "CMPGT Presult.H, Pg/Z, Zop1.H, Zop2.D" + public static unsafe Vector CompareGreaterThan(Vector left, Vector right); + + /// svbool_t svcmpgt_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2) : "CMPGT Presult.S, Pg/Z, Zop1.S, Zop2.D" + public static unsafe Vector CompareGreaterThan(Vector left, Vector right); + + /// svbool_t svcmpgt_wide[_u8](svbool_t pg, svuint8_t op1, svuint64_t op2) : "CMPHI Presult.B, Pg/Z, Zop1.B, Zop2.D" + public static unsafe Vector CompareGreaterThan(Vector left, Vector right); + + /// svbool_t svcmpgt_wide[_u16](svbool_t pg, svuint16_t op1, svuint64_t op2) : "CMPHI Presult.H, Pg/Z, Zop1.H, Zop2.D" + public static unsafe Vector CompareGreaterThan(Vector left, Vector right); + + /// svbool_t svcmpgt_wide[_u32](svbool_t pg, svuint32_t op1, svuint64_t op2) : "CMPHI Presult.S, Pg/Z, Zop1.S, Zop2.D" + public static unsafe Vector CompareGreaterThan(Vector left, Vector right); + + + /// CompareGreaterThanOrEqual : Compare greater than or equal to + + /// svbool_t svcmpge[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FCMGE Presult.S, Pg/Z, Zop1.S, Zop2.S" + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmpge[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FCMGE Presult.D, Pg/Z, Zop1.D, Zop2.D" + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmpge[_s8](svbool_t pg, svint8_t op1, svint8_t op2) : "CMPGE Presult.B, Pg/Z, Zop1.B, Zop2.B" + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmpge[_s16](svbool_t pg, svint16_t op1, svint16_t op2) : "CMPGE Presult.H, Pg/Z, Zop1.H, Zop2.H" + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmpge[_s32](svbool_t pg, svint32_t op1, svint32_t op2) : "CMPGE Presult.S, Pg/Z, Zop1.S, Zop2.S" + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmpge[_s64](svbool_t pg, svint64_t op1, svint64_t op2) : "CMPGE Presult.D, Pg/Z, Zop1.D, Zop2.D" + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmpge[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) : "CMPHS Presult.B, Pg/Z, Zop1.B, Zop2.B" + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmpge[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) : "CMPHS Presult.H, Pg/Z, Zop1.H, Zop2.H" + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmpge[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) : "CMPHS Presult.S, Pg/Z, Zop1.S, Zop2.S" + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmpge[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) : "CMPHS Presult.D, Pg/Z, Zop1.D, Zop2.D" + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmpge_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2) : "CMPGE Presult.B, Pg/Z, Zop1.B, Zop2.D" + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmpge_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2) : "CMPGE Presult.H, Pg/Z, Zop1.H, Zop2.D" + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmpge_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2) : "CMPGE Presult.S, Pg/Z, Zop1.S, Zop2.D" + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmpge_wide[_u8](svbool_t pg, svuint8_t op1, svuint64_t op2) : "CMPHS Presult.B, Pg/Z, Zop1.B, Zop2.D" + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmpge_wide[_u16](svbool_t pg, svuint16_t op1, svuint64_t op2) : "CMPHS Presult.H, Pg/Z, Zop1.H, Zop2.D" + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmpge_wide[_u32](svbool_t pg, svuint32_t op1, svuint64_t op2) : "CMPHS Presult.S, Pg/Z, Zop1.S, Zop2.D" + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right); + + + /// CompareLessThan : Compare less than + + /// svbool_t svcmplt[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FCMGT Presult.S, Pg/Z, Zop2.S, Zop1.S" + public static unsafe Vector CompareLessThan(Vector left, Vector right); + + /// svbool_t svcmplt[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FCMGT Presult.D, Pg/Z, Zop2.D, Zop1.D" + public static unsafe Vector CompareLessThan(Vector left, Vector right); + + /// svbool_t svcmplt[_s8](svbool_t pg, svint8_t op1, svint8_t op2) : "CMPGT Presult.B, Pg/Z, Zop2.B, Zop1.B" + public static unsafe Vector CompareLessThan(Vector left, Vector right); + + /// svbool_t svcmplt[_s16](svbool_t pg, svint16_t op1, svint16_t op2) : "CMPGT Presult.H, Pg/Z, Zop2.H, Zop1.H" + public static unsafe Vector CompareLessThan(Vector left, Vector right); + + /// svbool_t svcmplt[_s32](svbool_t pg, svint32_t op1, svint32_t op2) : "CMPGT Presult.S, Pg/Z, Zop2.S, Zop1.S" + public static unsafe Vector CompareLessThan(Vector left, Vector right); + + /// svbool_t svcmplt[_s64](svbool_t pg, svint64_t op1, svint64_t op2) : "CMPGT Presult.D, Pg/Z, Zop2.D, Zop1.D" + public static unsafe Vector CompareLessThan(Vector left, Vector right); + + /// svbool_t svcmplt[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) : "CMPHI Presult.B, Pg/Z, Zop2.B, Zop1.B" + public static unsafe Vector CompareLessThan(Vector left, Vector right); + + /// svbool_t svcmplt[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) : "CMPHI Presult.H, Pg/Z, Zop2.H, Zop1.H" + public static unsafe Vector CompareLessThan(Vector left, Vector right); + + /// svbool_t svcmplt[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) : "CMPHI Presult.S, Pg/Z, Zop2.S, Zop1.S" + public static unsafe Vector CompareLessThan(Vector left, Vector right); + + /// svbool_t svcmplt[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) : "CMPHI Presult.D, Pg/Z, Zop2.D, Zop1.D" + public static unsafe Vector CompareLessThan(Vector left, Vector right); + + /// svbool_t svcmplt_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2) : "CMPLT Presult.B, Pg/Z, Zop1.B, Zop2.D" + public static unsafe Vector CompareLessThan(Vector left, Vector right); + + /// svbool_t svcmplt_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2) : "CMPLT Presult.H, Pg/Z, Zop1.H, Zop2.D" + public static unsafe Vector CompareLessThan(Vector left, Vector right); + + /// svbool_t svcmplt_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2) : "CMPLT Presult.S, Pg/Z, Zop1.S, Zop2.D" + public static unsafe Vector CompareLessThan(Vector left, Vector right); + + /// svbool_t svcmplt_wide[_u8](svbool_t pg, svuint8_t op1, svuint64_t op2) : "CMPLO Presult.B, Pg/Z, Zop1.B, Zop2.D" + public static unsafe Vector CompareLessThan(Vector left, Vector right); + + /// svbool_t svcmplt_wide[_u16](svbool_t pg, svuint16_t op1, svuint64_t op2) : "CMPLO Presult.H, Pg/Z, Zop1.H, Zop2.D" + public static unsafe Vector CompareLessThan(Vector left, Vector right); + + /// svbool_t svcmplt_wide[_u32](svbool_t pg, svuint32_t op1, svuint64_t op2) : "CMPLO Presult.S, Pg/Z, Zop1.S, Zop2.D" + public static unsafe Vector CompareLessThan(Vector left, Vector right); + + + /// CompareLessThanOrEqual : Compare less than or equal to + + /// svbool_t svcmple[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FCMGE Presult.S, Pg/Z, Zop2.S, Zop1.S" + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmple[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FCMGE Presult.D, Pg/Z, Zop2.D, Zop1.D" + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmple[_s8](svbool_t pg, svint8_t op1, svint8_t op2) : "CMPGE Presult.B, Pg/Z, Zop2.B, Zop1.B" + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmple[_s16](svbool_t pg, svint16_t op1, svint16_t op2) : "CMPGE Presult.H, Pg/Z, Zop2.H, Zop1.H" + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmple[_s32](svbool_t pg, svint32_t op1, svint32_t op2) : "CMPGE Presult.S, Pg/Z, Zop2.S, Zop1.S" + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmple[_s64](svbool_t pg, svint64_t op1, svint64_t op2) : "CMPGE Presult.D, Pg/Z, Zop2.D, Zop1.D" + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmple[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) : "CMPHS Presult.B, Pg/Z, Zop2.B, Zop1.B" + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmple[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) : "CMPHS Presult.H, Pg/Z, Zop2.H, Zop1.H" + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmple[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) : "CMPHS Presult.S, Pg/Z, Zop2.S, Zop1.S" + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmple[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) : "CMPHS Presult.D, Pg/Z, Zop2.D, Zop1.D" + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmple_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2) : "CMPLE Presult.B, Pg/Z, Zop1.B, Zop2.D" + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmple_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2) : "CMPLE Presult.H, Pg/Z, Zop1.H, Zop2.D" + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmple_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2) : "CMPLE Presult.S, Pg/Z, Zop1.S, Zop2.D" + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmple_wide[_u8](svbool_t pg, svuint8_t op1, svuint64_t op2) : "CMPLS Presult.B, Pg/Z, Zop1.B, Zop2.D" + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmple_wide[_u16](svbool_t pg, svuint16_t op1, svuint64_t op2) : "CMPLS Presult.H, Pg/Z, Zop1.H, Zop2.D" + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right); + + /// svbool_t svcmple_wide[_u32](svbool_t pg, svuint32_t op1, svuint64_t op2) : "CMPLS Presult.S, Pg/Z, Zop1.S, Zop2.D" + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right); + + + /// CompareNotEqualTo : Compare not equal to + + /// svbool_t svcmpne[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FCMNE Presult.S, Pg/Z, Zop1.S, Zop2.S" + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right); + + /// svbool_t svcmpne[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FCMNE Presult.D, Pg/Z, Zop1.D, Zop2.D" + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right); + + /// svbool_t svcmpne[_s8](svbool_t pg, svint8_t op1, svint8_t op2) : "CMPNE Presult.B, Pg/Z, Zop1.B, Zop2.B" + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right); + + /// svbool_t svcmpne[_s16](svbool_t pg, svint16_t op1, svint16_t op2) : "CMPNE Presult.H, Pg/Z, Zop1.H, Zop2.H" + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right); + + /// svbool_t svcmpne[_s32](svbool_t pg, svint32_t op1, svint32_t op2) : "CMPNE Presult.S, Pg/Z, Zop1.S, Zop2.S" + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right); + + /// svbool_t svcmpne[_s64](svbool_t pg, svint64_t op1, svint64_t op2) : "CMPNE Presult.D, Pg/Z, Zop1.D, Zop2.D" + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right); + + /// svbool_t svcmpne[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) : "CMPNE Presult.B, Pg/Z, Zop1.B, Zop2.B" + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right); + + /// svbool_t svcmpne[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) : "CMPNE Presult.H, Pg/Z, Zop1.H, Zop2.H" + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right); + + /// svbool_t svcmpne[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) : "CMPNE Presult.S, Pg/Z, Zop1.S, Zop2.S" + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right); + + /// svbool_t svcmpne[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) : "CMPNE Presult.D, Pg/Z, Zop1.D, Zop2.D" + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right); + + /// svbool_t svcmpne_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2) : "CMPNE Presult.B, Pg/Z, Zop1.B, Zop2.D" + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right); + + /// svbool_t svcmpne_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2) : "CMPNE Presult.H, Pg/Z, Zop1.H, Zop2.D" + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right); + + /// svbool_t svcmpne_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2) : "CMPNE Presult.S, Pg/Z, Zop1.S, Zop2.D" + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right); + + + /// CompareUnordered : Compare unordered with + + /// svbool_t svcmpuo[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FCMUO Presult.S, Pg/Z, Zop1.S, Zop2.S" + public static unsafe Vector CompareUnordered(Vector left, Vector right); + + /// svbool_t svcmpuo[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FCMUO Presult.D, Pg/Z, Zop1.D, Zop2.D" + public static unsafe Vector CompareUnordered(Vector left, Vector right); + + + /// ConditionalExtractAfterLastActiveElement : Conditionally extract element after last + + /// svfloat32_t svclasta[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data) : "CLASTA Ztied.S, Pg, Ztied.S, Zdata.S" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.S, Pg, Zresult.S, Zdata.S" + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data); + + /// svfloat32_t svclasta[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data) : "CLASTA Ztied.S, Pg, Ztied.S, Zdata.S" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.S, Pg, Zresult.S, Zdata.S" + /// float32_t svclasta[_n_f32](svbool_t pg, float32_t fallback, svfloat32_t data) : "CLASTA Wtied, Pg, Wtied, Zdata.S" or "CLASTA Stied, Pg, Stied, Zdata.S" + public static unsafe float ConditionalExtractAfterLastActiveElement(Vector mask, float defaultValues, Vector data); + + /// svfloat64_t svclasta[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data) : "CLASTA Ztied.D, Pg, Ztied.D, Zdata.D" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.D, Pg, Zresult.D, Zdata.D" + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data); + + /// svfloat64_t svclasta[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data) : "CLASTA Ztied.D, Pg, Ztied.D, Zdata.D" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.D, Pg, Zresult.D, Zdata.D" + /// float64_t svclasta[_n_f64](svbool_t pg, float64_t fallback, svfloat64_t data) : "CLASTA Xtied, Pg, Xtied, Zdata.D" or "CLASTA Dtied, Pg, Dtied, Zdata.D" + public static unsafe double ConditionalExtractAfterLastActiveElement(Vector mask, double defaultValues, Vector data); + + /// svint8_t svclasta[_s8](svbool_t pg, svint8_t fallback, svint8_t data) : "CLASTA Ztied.B, Pg, Ztied.B, Zdata.B" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.B, Pg, Zresult.B, Zdata.B" + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data); + + /// svint8_t svclasta[_s8](svbool_t pg, svint8_t fallback, svint8_t data) : "CLASTA Ztied.B, Pg, Ztied.B, Zdata.B" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.B, Pg, Zresult.B, Zdata.B" + /// int8_t svclasta[_n_s8](svbool_t pg, int8_t fallback, svint8_t data) : "CLASTA Wtied, Pg, Wtied, Zdata.B" or "CLASTA Btied, Pg, Btied, Zdata.B" + public static unsafe sbyte ConditionalExtractAfterLastActiveElement(Vector mask, sbyte defaultValues, Vector data); + + /// svint16_t svclasta[_s16](svbool_t pg, svint16_t fallback, svint16_t data) : "CLASTA Ztied.H, Pg, Ztied.H, Zdata.H" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.H, Pg, Zresult.H, Zdata.H" + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data); + + /// svint16_t svclasta[_s16](svbool_t pg, svint16_t fallback, svint16_t data) : "CLASTA Ztied.H, Pg, Ztied.H, Zdata.H" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.H, Pg, Zresult.H, Zdata.H" + /// int16_t svclasta[_n_s16](svbool_t pg, int16_t fallback, svint16_t data) : "CLASTA Wtied, Pg, Wtied, Zdata.H" or "CLASTA Htied, Pg, Htied, Zdata.H" + public static unsafe short ConditionalExtractAfterLastActiveElement(Vector mask, short defaultValues, Vector data); + + /// svint32_t svclasta[_s32](svbool_t pg, svint32_t fallback, svint32_t data) : "CLASTA Ztied.S, Pg, Ztied.S, Zdata.S" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.S, Pg, Zresult.S, Zdata.S" + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data); + + /// svint32_t svclasta[_s32](svbool_t pg, svint32_t fallback, svint32_t data) : "CLASTA Ztied.S, Pg, Ztied.S, Zdata.S" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.S, Pg, Zresult.S, Zdata.S" + /// int32_t svclasta[_n_s32](svbool_t pg, int32_t fallback, svint32_t data) : "CLASTA Wtied, Pg, Wtied, Zdata.S" or "CLASTA Stied, Pg, Stied, Zdata.S" + public static unsafe int ConditionalExtractAfterLastActiveElement(Vector mask, int defaultValues, Vector data); + + /// svint64_t svclasta[_s64](svbool_t pg, svint64_t fallback, svint64_t data) : "CLASTA Ztied.D, Pg, Ztied.D, Zdata.D" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.D, Pg, Zresult.D, Zdata.D" + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data); + + /// svint64_t svclasta[_s64](svbool_t pg, svint64_t fallback, svint64_t data) : "CLASTA Ztied.D, Pg, Ztied.D, Zdata.D" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.D, Pg, Zresult.D, Zdata.D" + /// int64_t svclasta[_n_s64](svbool_t pg, int64_t fallback, svint64_t data) : "CLASTA Xtied, Pg, Xtied, Zdata.D" or "CLASTA Dtied, Pg, Dtied, Zdata.D" + public static unsafe long ConditionalExtractAfterLastActiveElement(Vector mask, long defaultValues, Vector data); + + /// svuint8_t svclasta[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data) : "CLASTA Ztied.B, Pg, Ztied.B, Zdata.B" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.B, Pg, Zresult.B, Zdata.B" + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data); + + /// svuint8_t svclasta[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data) : "CLASTA Ztied.B, Pg, Ztied.B, Zdata.B" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.B, Pg, Zresult.B, Zdata.B" + /// uint8_t svclasta[_n_u8](svbool_t pg, uint8_t fallback, svuint8_t data) : "CLASTA Wtied, Pg, Wtied, Zdata.B" or "CLASTA Btied, Pg, Btied, Zdata.B" + public static unsafe byte ConditionalExtractAfterLastActiveElement(Vector mask, byte defaultValues, Vector data); + + /// svuint16_t svclasta[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data) : "CLASTA Ztied.H, Pg, Ztied.H, Zdata.H" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.H, Pg, Zresult.H, Zdata.H" + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data); + + /// svuint16_t svclasta[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data) : "CLASTA Ztied.H, Pg, Ztied.H, Zdata.H" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.H, Pg, Zresult.H, Zdata.H" + /// uint16_t svclasta[_n_u16](svbool_t pg, uint16_t fallback, svuint16_t data) : "CLASTA Wtied, Pg, Wtied, Zdata.H" or "CLASTA Htied, Pg, Htied, Zdata.H" + public static unsafe ushort ConditionalExtractAfterLastActiveElement(Vector mask, ushort defaultValues, Vector data); + + /// svuint32_t svclasta[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data) : "CLASTA Ztied.S, Pg, Ztied.S, Zdata.S" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.S, Pg, Zresult.S, Zdata.S" + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data); + + /// svuint32_t svclasta[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data) : "CLASTA Ztied.S, Pg, Ztied.S, Zdata.S" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.S, Pg, Zresult.S, Zdata.S" + /// uint32_t svclasta[_n_u32](svbool_t pg, uint32_t fallback, svuint32_t data) : "CLASTA Wtied, Pg, Wtied, Zdata.S" or "CLASTA Stied, Pg, Stied, Zdata.S" + public static unsafe uint ConditionalExtractAfterLastActiveElement(Vector mask, uint defaultValues, Vector data); + + /// svuint64_t svclasta[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data) : "CLASTA Ztied.D, Pg, Ztied.D, Zdata.D" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.D, Pg, Zresult.D, Zdata.D" + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data); + + /// svuint64_t svclasta[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data) : "CLASTA Ztied.D, Pg, Ztied.D, Zdata.D" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.D, Pg, Zresult.D, Zdata.D" + /// uint64_t svclasta[_n_u64](svbool_t pg, uint64_t fallback, svuint64_t data) : "CLASTA Xtied, Pg, Xtied, Zdata.D" or "CLASTA Dtied, Pg, Dtied, Zdata.D" + public static unsafe ulong ConditionalExtractAfterLastActiveElement(Vector mask, ulong defaultValues, Vector data); + + + /// ConditionalExtractAfterLastActiveElementAndReplicate : Conditionally extract element after last + + /// svfloat32_t svclasta[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data) : "CLASTA Ztied.S, Pg, Ztied.S, Zdata.S" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.S, Pg, Zresult.S, Zdata.S" + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data); + + /// svfloat64_t svclasta[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data) : "CLASTA Ztied.D, Pg, Ztied.D, Zdata.D" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.D, Pg, Zresult.D, Zdata.D" + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data); + + /// svint8_t svclasta[_s8](svbool_t pg, svint8_t fallback, svint8_t data) : "CLASTA Ztied.B, Pg, Ztied.B, Zdata.B" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.B, Pg, Zresult.B, Zdata.B" + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data); + + /// svint16_t svclasta[_s16](svbool_t pg, svint16_t fallback, svint16_t data) : "CLASTA Ztied.H, Pg, Ztied.H, Zdata.H" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.H, Pg, Zresult.H, Zdata.H" + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data); + + /// svint32_t svclasta[_s32](svbool_t pg, svint32_t fallback, svint32_t data) : "CLASTA Ztied.S, Pg, Ztied.S, Zdata.S" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.S, Pg, Zresult.S, Zdata.S" + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data); + + /// svint64_t svclasta[_s64](svbool_t pg, svint64_t fallback, svint64_t data) : "CLASTA Ztied.D, Pg, Ztied.D, Zdata.D" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.D, Pg, Zresult.D, Zdata.D" + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data); + + /// svuint8_t svclasta[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data) : "CLASTA Ztied.B, Pg, Ztied.B, Zdata.B" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.B, Pg, Zresult.B, Zdata.B" + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data); + + /// svuint16_t svclasta[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data) : "CLASTA Ztied.H, Pg, Ztied.H, Zdata.H" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.H, Pg, Zresult.H, Zdata.H" + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data); + + /// svuint32_t svclasta[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data) : "CLASTA Ztied.S, Pg, Ztied.S, Zdata.S" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.S, Pg, Zresult.S, Zdata.S" + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data); + + /// svuint64_t svclasta[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data) : "CLASTA Ztied.D, Pg, Ztied.D, Zdata.D" or "MOVPRFX Zresult, Zfallback; CLASTA Zresult.D, Pg, Zresult.D, Zdata.D" + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data); + + + /// ConditionalExtractLastActiveElement : Conditionally extract last element + + /// svfloat32_t svclastb[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data) : "CLASTB Ztied.S, Pg, Ztied.S, Zdata.S" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.S, Pg, Zresult.S, Zdata.S" + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data); + + /// svfloat32_t svclastb[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data) : "CLASTB Ztied.S, Pg, Ztied.S, Zdata.S" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.S, Pg, Zresult.S, Zdata.S" + /// float32_t svclastb[_n_f32](svbool_t pg, float32_t fallback, svfloat32_t data) : "CLASTB Wtied, Pg, Wtied, Zdata.S" or "CLASTB Stied, Pg, Stied, Zdata.S" + public static unsafe float ConditionalExtractLastActiveElement(Vector mask, float defaultValues, Vector data); + + /// svfloat64_t svclastb[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data) : "CLASTB Ztied.D, Pg, Ztied.D, Zdata.D" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.D, Pg, Zresult.D, Zdata.D" + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data); + + /// svfloat64_t svclastb[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data) : "CLASTB Ztied.D, Pg, Ztied.D, Zdata.D" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.D, Pg, Zresult.D, Zdata.D" + /// float64_t svclastb[_n_f64](svbool_t pg, float64_t fallback, svfloat64_t data) : "CLASTB Xtied, Pg, Xtied, Zdata.D" or "CLASTB Dtied, Pg, Dtied, Zdata.D" + public static unsafe double ConditionalExtractLastActiveElement(Vector mask, double defaultValues, Vector data); + + /// svint8_t svclastb[_s8](svbool_t pg, svint8_t fallback, svint8_t data) : "CLASTB Ztied.B, Pg, Ztied.B, Zdata.B" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.B, Pg, Zresult.B, Zdata.B" + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data); + + /// svint8_t svclastb[_s8](svbool_t pg, svint8_t fallback, svint8_t data) : "CLASTB Ztied.B, Pg, Ztied.B, Zdata.B" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.B, Pg, Zresult.B, Zdata.B" + /// int8_t svclastb[_n_s8](svbool_t pg, int8_t fallback, svint8_t data) : "CLASTB Wtied, Pg, Wtied, Zdata.B" or "CLASTB Btied, Pg, Btied, Zdata.B" + public static unsafe sbyte ConditionalExtractLastActiveElement(Vector mask, sbyte defaultValues, Vector data); + + /// svint16_t svclastb[_s16](svbool_t pg, svint16_t fallback, svint16_t data) : "CLASTB Ztied.H, Pg, Ztied.H, Zdata.H" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.H, Pg, Zresult.H, Zdata.H" + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data); + + /// svint16_t svclastb[_s16](svbool_t pg, svint16_t fallback, svint16_t data) : "CLASTB Ztied.H, Pg, Ztied.H, Zdata.H" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.H, Pg, Zresult.H, Zdata.H" + /// int16_t svclastb[_n_s16](svbool_t pg, int16_t fallback, svint16_t data) : "CLASTB Wtied, Pg, Wtied, Zdata.H" or "CLASTB Htied, Pg, Htied, Zdata.H" + public static unsafe short ConditionalExtractLastActiveElement(Vector mask, short defaultValues, Vector data); + + /// svint32_t svclastb[_s32](svbool_t pg, svint32_t fallback, svint32_t data) : "CLASTB Ztied.S, Pg, Ztied.S, Zdata.S" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.S, Pg, Zresult.S, Zdata.S" + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data); + + /// svint32_t svclastb[_s32](svbool_t pg, svint32_t fallback, svint32_t data) : "CLASTB Ztied.S, Pg, Ztied.S, Zdata.S" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.S, Pg, Zresult.S, Zdata.S" + /// int32_t svclastb[_n_s32](svbool_t pg, int32_t fallback, svint32_t data) : "CLASTB Wtied, Pg, Wtied, Zdata.S" or "CLASTB Stied, Pg, Stied, Zdata.S" + public static unsafe int ConditionalExtractLastActiveElement(Vector mask, int defaultValues, Vector data); + + /// svint64_t svclastb[_s64](svbool_t pg, svint64_t fallback, svint64_t data) : "CLASTB Ztied.D, Pg, Ztied.D, Zdata.D" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.D, Pg, Zresult.D, Zdata.D" + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data); + + /// svint64_t svclastb[_s64](svbool_t pg, svint64_t fallback, svint64_t data) : "CLASTB Ztied.D, Pg, Ztied.D, Zdata.D" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.D, Pg, Zresult.D, Zdata.D" + /// int64_t svclastb[_n_s64](svbool_t pg, int64_t fallback, svint64_t data) : "CLASTB Xtied, Pg, Xtied, Zdata.D" or "CLASTB Dtied, Pg, Dtied, Zdata.D" + public static unsafe long ConditionalExtractLastActiveElement(Vector mask, long defaultValues, Vector data); + + /// svuint8_t svclastb[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data) : "CLASTB Ztied.B, Pg, Ztied.B, Zdata.B" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.B, Pg, Zresult.B, Zdata.B" + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data); + + /// svuint8_t svclastb[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data) : "CLASTB Ztied.B, Pg, Ztied.B, Zdata.B" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.B, Pg, Zresult.B, Zdata.B" + /// uint8_t svclastb[_n_u8](svbool_t pg, uint8_t fallback, svuint8_t data) : "CLASTB Wtied, Pg, Wtied, Zdata.B" or "CLASTB Btied, Pg, Btied, Zdata.B" + public static unsafe byte ConditionalExtractLastActiveElement(Vector mask, byte defaultValues, Vector data); + + /// svuint16_t svclastb[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data) : "CLASTB Ztied.H, Pg, Ztied.H, Zdata.H" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.H, Pg, Zresult.H, Zdata.H" + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data); + + /// svuint16_t svclastb[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data) : "CLASTB Ztied.H, Pg, Ztied.H, Zdata.H" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.H, Pg, Zresult.H, Zdata.H" + /// uint16_t svclastb[_n_u16](svbool_t pg, uint16_t fallback, svuint16_t data) : "CLASTB Wtied, Pg, Wtied, Zdata.H" or "CLASTB Htied, Pg, Htied, Zdata.H" + public static unsafe ushort ConditionalExtractLastActiveElement(Vector mask, ushort defaultValues, Vector data); + + /// svuint32_t svclastb[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data) : "CLASTB Ztied.S, Pg, Ztied.S, Zdata.S" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.S, Pg, Zresult.S, Zdata.S" + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data); + + /// svuint32_t svclastb[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data) : "CLASTB Ztied.S, Pg, Ztied.S, Zdata.S" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.S, Pg, Zresult.S, Zdata.S" + /// uint32_t svclastb[_n_u32](svbool_t pg, uint32_t fallback, svuint32_t data) : "CLASTB Wtied, Pg, Wtied, Zdata.S" or "CLASTB Stied, Pg, Stied, Zdata.S" + public static unsafe uint ConditionalExtractLastActiveElement(Vector mask, uint defaultValues, Vector data); + + /// svuint64_t svclastb[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data) : "CLASTB Ztied.D, Pg, Ztied.D, Zdata.D" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.D, Pg, Zresult.D, Zdata.D" + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data); + + /// svuint64_t svclastb[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data) : "CLASTB Ztied.D, Pg, Ztied.D, Zdata.D" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.D, Pg, Zresult.D, Zdata.D" + /// uint64_t svclastb[_n_u64](svbool_t pg, uint64_t fallback, svuint64_t data) : "CLASTB Xtied, Pg, Xtied, Zdata.D" or "CLASTB Dtied, Pg, Dtied, Zdata.D" + public static unsafe ulong ConditionalExtractLastActiveElement(Vector mask, ulong defaultValues, Vector data); + + + /// ConditionalExtractLastActiveElementAndReplicate : Conditionally extract last element + + /// svfloat32_t svclastb[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data) : "CLASTB Ztied.S, Pg, Ztied.S, Zdata.S" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.S, Pg, Zresult.S, Zdata.S" + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data); + + /// svfloat64_t svclastb[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data) : "CLASTB Ztied.D, Pg, Ztied.D, Zdata.D" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.D, Pg, Zresult.D, Zdata.D" + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data); + + /// svint8_t svclastb[_s8](svbool_t pg, svint8_t fallback, svint8_t data) : "CLASTB Ztied.B, Pg, Ztied.B, Zdata.B" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.B, Pg, Zresult.B, Zdata.B" + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data); + + /// svint16_t svclastb[_s16](svbool_t pg, svint16_t fallback, svint16_t data) : "CLASTB Ztied.H, Pg, Ztied.H, Zdata.H" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.H, Pg, Zresult.H, Zdata.H" + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data); + + /// svint32_t svclastb[_s32](svbool_t pg, svint32_t fallback, svint32_t data) : "CLASTB Ztied.S, Pg, Ztied.S, Zdata.S" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.S, Pg, Zresult.S, Zdata.S" + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data); + + /// svint64_t svclastb[_s64](svbool_t pg, svint64_t fallback, svint64_t data) : "CLASTB Ztied.D, Pg, Ztied.D, Zdata.D" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.D, Pg, Zresult.D, Zdata.D" + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data); + + /// svuint8_t svclastb[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data) : "CLASTB Ztied.B, Pg, Ztied.B, Zdata.B" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.B, Pg, Zresult.B, Zdata.B" + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data); + + /// svuint16_t svclastb[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data) : "CLASTB Ztied.H, Pg, Ztied.H, Zdata.H" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.H, Pg, Zresult.H, Zdata.H" + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data); + + /// svuint32_t svclastb[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data) : "CLASTB Ztied.S, Pg, Ztied.S, Zdata.S" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.S, Pg, Zresult.S, Zdata.S" + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data); + + /// svuint64_t svclastb[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data) : "CLASTB Ztied.D, Pg, Ztied.D, Zdata.D" or "MOVPRFX Zresult, Zfallback; CLASTB Zresult.D, Pg, Zresult.D, Zdata.D" + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data); + + + /// ConditionalSelect : Conditionally select elements + + /// svfloat32_t svsel[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "SEL Zresult.S, Pg, Zop1.S, Zop2.S" + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right); + + /// svfloat64_t svsel[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "SEL Zresult.D, Pg, Zop1.D, Zop2.D" + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right); + + /// svint8_t svsel[_s8](svbool_t pg, svint8_t op1, svint8_t op2) : "SEL Zresult.B, Pg, Zop1.B, Zop2.B" + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) : "SEL Presult.B, Pg, Pop1.B, Pop2.B" + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right); + + /// svint16_t svsel[_s16](svbool_t pg, svint16_t op1, svint16_t op2) : "SEL Zresult.H, Pg, Zop1.H, Zop2.H" + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) : "SEL Presult.B, Pg, Pop1.B, Pop2.B" + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right); + + /// svint32_t svsel[_s32](svbool_t pg, svint32_t op1, svint32_t op2) : "SEL Zresult.S, Pg, Zop1.S, Zop2.S" + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) : "SEL Presult.B, Pg, Pop1.B, Pop2.B" + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right); + + /// svint64_t svsel[_s64](svbool_t pg, svint64_t op1, svint64_t op2) : "SEL Zresult.D, Pg, Zop1.D, Zop2.D" + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) : "SEL Presult.B, Pg, Pop1.B, Pop2.B" + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right); + + /// svuint8_t svsel[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) : "SEL Zresult.B, Pg, Zop1.B, Zop2.B" + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) : "SEL Presult.B, Pg, Pop1.B, Pop2.B" + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right); + + /// svuint16_t svsel[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) : "SEL Zresult.H, Pg, Zop1.H, Zop2.H" + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) : "SEL Presult.B, Pg, Pop1.B, Pop2.B" + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right); + + /// svuint32_t svsel[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) : "SEL Zresult.S, Pg, Zop1.S, Zop2.S" + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) : "SEL Presult.B, Pg, Pop1.B, Pop2.B" + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right); + + /// svuint64_t svsel[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) : "SEL Zresult.D, Pg, Zop1.D, Zop2.D" + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) : "SEL Presult.B, Pg, Pop1.B, Pop2.B" + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right); + + + /// CreateBreakAfterMask : Break after first true condition + + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) : "BRKA Ptied.B, Pg/M, Pop.B" + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) : "BRKA Presult.B, Pg/Z, Pop.B" + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask); + + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) : "BRKA Ptied.B, Pg/M, Pop.B" + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) : "BRKA Presult.B, Pg/Z, Pop.B" + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask); + + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) : "BRKA Ptied.B, Pg/M, Pop.B" + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) : "BRKA Presult.B, Pg/Z, Pop.B" + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask); + + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) : "BRKA Ptied.B, Pg/M, Pop.B" + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) : "BRKA Presult.B, Pg/Z, Pop.B" + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask); + + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) : "BRKA Ptied.B, Pg/M, Pop.B" + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) : "BRKA Presult.B, Pg/Z, Pop.B" + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask); + + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) : "BRKA Ptied.B, Pg/M, Pop.B" + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) : "BRKA Presult.B, Pg/Z, Pop.B" + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask); + + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) : "BRKA Ptied.B, Pg/M, Pop.B" + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) : "BRKA Presult.B, Pg/Z, Pop.B" + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask); + + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) : "BRKA Ptied.B, Pg/M, Pop.B" + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) : "BRKA Presult.B, Pg/Z, Pop.B" + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask); + + + /// CreateBreakAfterPropagateMask : Break after first true condition, propagating from previous partition + + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BRKPA Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right); + + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BRKPA Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right); + + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BRKPA Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right); + + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BRKPA Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right); + + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BRKPA Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right); + + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BRKPA Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right); + + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BRKPA Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right); + + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BRKPA Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right); + + + /// CreateBreakBeforeMask : Break before first true condition + + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) : "BRKB Ptied.B, Pg/M, Pop.B" + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) : "BRKB Presult.B, Pg/Z, Pop.B" + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask); + + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) : "BRKB Ptied.B, Pg/M, Pop.B" + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) : "BRKB Presult.B, Pg/Z, Pop.B" + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask); + + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) : "BRKB Ptied.B, Pg/M, Pop.B" + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) : "BRKB Presult.B, Pg/Z, Pop.B" + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask); + + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) : "BRKB Ptied.B, Pg/M, Pop.B" + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) : "BRKB Presult.B, Pg/Z, Pop.B" + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask); + + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) : "BRKB Ptied.B, Pg/M, Pop.B" + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) : "BRKB Presult.B, Pg/Z, Pop.B" + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask); + + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) : "BRKB Ptied.B, Pg/M, Pop.B" + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) : "BRKB Presult.B, Pg/Z, Pop.B" + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask); + + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) : "BRKB Ptied.B, Pg/M, Pop.B" + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) : "BRKB Presult.B, Pg/Z, Pop.B" + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask); + + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) : "BRKB Ptied.B, Pg/M, Pop.B" + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) : "BRKB Presult.B, Pg/Z, Pop.B" + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask); + + + /// CreateBreakBeforePropagateMask : Break before first true condition, propagating from previous partition + + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BRKPB Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right); + + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BRKPB Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right); + + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BRKPB Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right); + + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BRKPB Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right); + + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BRKPB Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right); + + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BRKPB Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right); + + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BRKPB Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right); + + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BRKPB Presult.B, Pg/Z, Pop1.B, Pop2.B" + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right); + + + /// CreateBreakPropagateMask : Propagate break to next partition + + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BRKN Ptied2.B, Pg/Z, Pop1.B, Ptied2.B" + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask); + + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BRKN Ptied2.B, Pg/Z, Pop1.B, Ptied2.B" + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask); + + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BRKN Ptied2.B, Pg/Z, Pop1.B, Ptied2.B" + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask); + + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BRKN Ptied2.B, Pg/Z, Pop1.B, Ptied2.B" + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask); + + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BRKN Ptied2.B, Pg/Z, Pop1.B, Ptied2.B" + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask); + + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BRKN Ptied2.B, Pg/Z, Pop1.B, Ptied2.B" + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask); + + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BRKN Ptied2.B, Pg/Z, Pop1.B, Ptied2.B" + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask); + + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) : "BRKN Ptied2.B, Pg/Z, Pop1.B, Ptied2.B" + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask); + + + /// CreateFalseMaskByte : Set all predicate elements to false + + /// svbool_t svpfalse[_b]() : "PFALSE Presult.B" + public static unsafe Vector CreateFalseMaskByte(); + + + /// CreateFalseMaskDouble : Set all predicate elements to false + + /// svbool_t svpfalse[_b]() : "PFALSE Presult.B" + public static unsafe Vector CreateFalseMaskDouble(); + + + /// CreateFalseMaskInt16 : Set all predicate elements to false + + /// svbool_t svpfalse[_b]() : "PFALSE Presult.B" + public static unsafe Vector CreateFalseMaskInt16(); + + + /// CreateFalseMaskInt32 : Set all predicate elements to false + + /// svbool_t svpfalse[_b]() : "PFALSE Presult.B" + public static unsafe Vector CreateFalseMaskInt32(); + + + /// CreateFalseMaskInt64 : Set all predicate elements to false + + /// svbool_t svpfalse[_b]() : "PFALSE Presult.B" + public static unsafe Vector CreateFalseMaskInt64(); + + + /// CreateFalseMaskSByte : Set all predicate elements to false + + /// svbool_t svpfalse[_b]() : "PFALSE Presult.B" + public static unsafe Vector CreateFalseMaskSByte(); + + + /// CreateFalseMaskSingle : Set all predicate elements to false + + /// svbool_t svpfalse[_b]() : "PFALSE Presult.B" + public static unsafe Vector CreateFalseMaskSingle(); + + + /// CreateFalseMaskUInt16 : Set all predicate elements to false + + /// svbool_t svpfalse[_b]() : "PFALSE Presult.B" + public static unsafe Vector CreateFalseMaskUInt16(); + + + /// CreateFalseMaskUInt32 : Set all predicate elements to false + + /// svbool_t svpfalse[_b]() : "PFALSE Presult.B" + public static unsafe Vector CreateFalseMaskUInt32(); + + + /// CreateFalseMaskUInt64 : Set all predicate elements to false + + /// svbool_t svpfalse[_b]() : "PFALSE Presult.B" + public static unsafe Vector CreateFalseMaskUInt64(); + + + /// CreateMaskForFirstActiveElement : Set the first active predicate element to true + + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) : "PFIRST Ptied.B, Pg, Ptied.B" + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask); + + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) : "PFIRST Ptied.B, Pg, Ptied.B" + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask); + + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) : "PFIRST Ptied.B, Pg, Ptied.B" + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask); + + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) : "PFIRST Ptied.B, Pg, Ptied.B" + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask); + + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) : "PFIRST Ptied.B, Pg, Ptied.B" + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask); + + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) : "PFIRST Ptied.B, Pg, Ptied.B" + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask); + + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) : "PFIRST Ptied.B, Pg, Ptied.B" + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask); + + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) : "PFIRST Ptied.B, Pg, Ptied.B" + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask); + + + /// CreateMaskForNextActiveElement : Find next active predicate + + /// svbool_t svpnext_b8(svbool_t pg, svbool_t op) : "PNEXT Ptied.B, Pg, Ptied.B" + public static unsafe Vector CreateMaskForNextActiveElement(Vector totalMask, Vector fromMask); + + /// svbool_t svpnext_b16(svbool_t pg, svbool_t op) : "PNEXT Ptied.H, Pg, Ptied.H" + public static unsafe Vector CreateMaskForNextActiveElement(Vector totalMask, Vector fromMask); + + /// svbool_t svpnext_b32(svbool_t pg, svbool_t op) : "PNEXT Ptied.S, Pg, Ptied.S" + public static unsafe Vector CreateMaskForNextActiveElement(Vector totalMask, Vector fromMask); + + /// svbool_t svpnext_b64(svbool_t pg, svbool_t op) : "PNEXT Ptied.D, Pg, Ptied.D" + public static unsafe Vector CreateMaskForNextActiveElement(Vector totalMask, Vector fromMask); + + + /// CreateTrueMaskByte : Set predicate elements to true + + /// svbool_t svptrue_pat_b8(enum svpattern pattern) : "PTRUE Presult.B, pattern" + public static unsafe Vector CreateTrueMaskByte([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + + /// CreateTrueMaskDouble : Set predicate elements to true + + /// svbool_t svptrue_pat_b8(enum svpattern pattern) : "PTRUE Presult.B, pattern" + public static unsafe Vector CreateTrueMaskDouble([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + + /// CreateTrueMaskInt16 : Set predicate elements to true + + /// svbool_t svptrue_pat_b8(enum svpattern pattern) : "PTRUE Presult.B, pattern" + public static unsafe Vector CreateTrueMaskInt16([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + + /// CreateTrueMaskInt32 : Set predicate elements to true + + /// svbool_t svptrue_pat_b8(enum svpattern pattern) : "PTRUE Presult.B, pattern" + public static unsafe Vector CreateTrueMaskInt32([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + + /// CreateTrueMaskInt64 : Set predicate elements to true + + /// svbool_t svptrue_pat_b8(enum svpattern pattern) : "PTRUE Presult.B, pattern" + public static unsafe Vector CreateTrueMaskInt64([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + + /// CreateTrueMaskSByte : Set predicate elements to true + + /// svbool_t svptrue_pat_b8(enum svpattern pattern) : "PTRUE Presult.B, pattern" + public static unsafe Vector CreateTrueMaskSByte([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + + /// CreateTrueMaskSingle : Set predicate elements to true + + /// svbool_t svptrue_pat_b8(enum svpattern pattern) : "PTRUE Presult.B, pattern" + public static unsafe Vector CreateTrueMaskSingle([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + + /// CreateTrueMaskUInt16 : Set predicate elements to true + + /// svbool_t svptrue_pat_b16(enum svpattern pattern) : "PTRUE Presult.H, pattern" + public static unsafe Vector CreateTrueMaskUInt16([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + + /// CreateTrueMaskUInt32 : Set predicate elements to true + + /// svbool_t svptrue_pat_b32(enum svpattern pattern) : "PTRUE Presult.S, pattern" + public static unsafe Vector CreateTrueMaskUInt32([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + + /// CreateTrueMaskUInt64 : Set predicate elements to true + + /// svbool_t svptrue_pat_b64(enum svpattern pattern) : "PTRUE Presult.D, pattern" + public static unsafe Vector CreateTrueMaskUInt64([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); + + + /// CreateWhileLessThanMask16Bit : While incrementing scalar is less than + + /// svbool_t svwhilelt_b16[_s32](int32_t op1, int32_t op2) : "WHILELT Presult.H, Wop1, Wop2" + public static unsafe Vector CreateWhileLessThanMask16Bit(int left, int right); + + /// svbool_t svwhilelt_b16[_s64](int64_t op1, int64_t op2) : "WHILELT Presult.H, Xop1, Xop2" + public static unsafe Vector CreateWhileLessThanMask16Bit(long left, long right); + + /// svbool_t svwhilelt_b16[_u32](uint32_t op1, uint32_t op2) : "WHILELO Presult.H, Wop1, Wop2" + public static unsafe Vector CreateWhileLessThanMask16Bit(uint left, uint right); + + /// svbool_t svwhilelt_b16[_u64](uint64_t op1, uint64_t op2) : "WHILELO Presult.H, Xop1, Xop2" + public static unsafe Vector CreateWhileLessThanMask16Bit(ulong left, ulong right); + + + /// CreateWhileLessThanMask32Bit : While incrementing scalar is less than + + /// svbool_t svwhilelt_b32[_s32](int32_t op1, int32_t op2) : "WHILELT Presult.S, Wop1, Wop2" + public static unsafe Vector CreateWhileLessThanMask32Bit(int left, int right); + + /// svbool_t svwhilelt_b32[_s64](int64_t op1, int64_t op2) : "WHILELT Presult.S, Xop1, Xop2" + public static unsafe Vector CreateWhileLessThanMask32Bit(long left, long right); + + /// svbool_t svwhilelt_b32[_u32](uint32_t op1, uint32_t op2) : "WHILELO Presult.S, Wop1, Wop2" + public static unsafe Vector CreateWhileLessThanMask32Bit(uint left, uint right); + + /// svbool_t svwhilelt_b32[_u64](uint64_t op1, uint64_t op2) : "WHILELO Presult.S, Xop1, Xop2" + public static unsafe Vector CreateWhileLessThanMask32Bit(ulong left, ulong right); + + + /// CreateWhileLessThanMask64Bit : While incrementing scalar is less than + + /// svbool_t svwhilelt_b64[_s32](int32_t op1, int32_t op2) : "WHILELT Presult.D, Wop1, Wop2" + public static unsafe Vector CreateWhileLessThanMask64Bit(int left, int right); + + /// svbool_t svwhilelt_b64[_s64](int64_t op1, int64_t op2) : "WHILELT Presult.D, Xop1, Xop2" + public static unsafe Vector CreateWhileLessThanMask64Bit(long left, long right); + + /// svbool_t svwhilelt_b64[_u32](uint32_t op1, uint32_t op2) : "WHILELO Presult.D, Wop1, Wop2" + public static unsafe Vector CreateWhileLessThanMask64Bit(uint left, uint right); + + /// svbool_t svwhilelt_b64[_u64](uint64_t op1, uint64_t op2) : "WHILELO Presult.D, Xop1, Xop2" + public static unsafe Vector CreateWhileLessThanMask64Bit(ulong left, ulong right); + + + /// CreateWhileLessThanMask8Bit : While incrementing scalar is less than + + /// svbool_t svwhilelt_b8[_s32](int32_t op1, int32_t op2) : "WHILELT Presult.B, Wop1, Wop2" + public static unsafe Vector CreateWhileLessThanMask8Bit(int left, int right); + + /// svbool_t svwhilelt_b8[_s64](int64_t op1, int64_t op2) : "WHILELT Presult.B, Xop1, Xop2" + public static unsafe Vector CreateWhileLessThanMask8Bit(long left, long right); + + /// svbool_t svwhilelt_b8[_u32](uint32_t op1, uint32_t op2) : "WHILELO Presult.B, Wop1, Wop2" + public static unsafe Vector CreateWhileLessThanMask8Bit(uint left, uint right); + + /// svbool_t svwhilelt_b8[_u64](uint64_t op1, uint64_t op2) : "WHILELO Presult.B, Xop1, Xop2" + public static unsafe Vector CreateWhileLessThanMask8Bit(ulong left, ulong right); + + + /// CreateWhileLessThanOrEqualMask16Bit : While incrementing scalar is less than or equal to + + /// svbool_t svwhilele_b16[_s32](int32_t op1, int32_t op2) : "WHILELE Presult.H, Wop1, Wop2" + public static unsafe Vector CreateWhileLessThanOrEqualMask16Bit(int left, int right); + + /// svbool_t svwhilele_b16[_s64](int64_t op1, int64_t op2) : "WHILELE Presult.H, Xop1, Xop2" + public static unsafe Vector CreateWhileLessThanOrEqualMask16Bit(long left, long right); + + /// svbool_t svwhilele_b16[_u32](uint32_t op1, uint32_t op2) : "WHILELS Presult.H, Wop1, Wop2" + public static unsafe Vector CreateWhileLessThanOrEqualMask16Bit(uint left, uint right); + + /// svbool_t svwhilele_b16[_u64](uint64_t op1, uint64_t op2) : "WHILELS Presult.H, Xop1, Xop2" + public static unsafe Vector CreateWhileLessThanOrEqualMask16Bit(ulong left, ulong right); + + + /// CreateWhileLessThanOrEqualMask32Bit : While incrementing scalar is less than or equal to + + /// svbool_t svwhilele_b32[_s32](int32_t op1, int32_t op2) : "WHILELE Presult.S, Wop1, Wop2" + public static unsafe Vector CreateWhileLessThanOrEqualMask32Bit(int left, int right); + + /// svbool_t svwhilele_b32[_s64](int64_t op1, int64_t op2) : "WHILELE Presult.S, Xop1, Xop2" + public static unsafe Vector CreateWhileLessThanOrEqualMask32Bit(long left, long right); + + /// svbool_t svwhilele_b32[_u32](uint32_t op1, uint32_t op2) : "WHILELS Presult.S, Wop1, Wop2" + public static unsafe Vector CreateWhileLessThanOrEqualMask32Bit(uint left, uint right); + + /// svbool_t svwhilele_b32[_u64](uint64_t op1, uint64_t op2) : "WHILELS Presult.S, Xop1, Xop2" + public static unsafe Vector CreateWhileLessThanOrEqualMask32Bit(ulong left, ulong right); + + + /// CreateWhileLessThanOrEqualMask64Bit : While incrementing scalar is less than or equal to + + /// svbool_t svwhilele_b64[_s32](int32_t op1, int32_t op2) : "WHILELE Presult.D, Wop1, Wop2" + public static unsafe Vector CreateWhileLessThanOrEqualMask64Bit(int left, int right); + + /// svbool_t svwhilele_b64[_s64](int64_t op1, int64_t op2) : "WHILELE Presult.D, Xop1, Xop2" + public static unsafe Vector CreateWhileLessThanOrEqualMask64Bit(long left, long right); + + /// svbool_t svwhilele_b64[_u32](uint32_t op1, uint32_t op2) : "WHILELS Presult.D, Wop1, Wop2" + public static unsafe Vector CreateWhileLessThanOrEqualMask64Bit(uint left, uint right); + + /// svbool_t svwhilele_b64[_u64](uint64_t op1, uint64_t op2) : "WHILELS Presult.D, Xop1, Xop2" + public static unsafe Vector CreateWhileLessThanOrEqualMask64Bit(ulong left, ulong right); + + + /// CreateWhileLessThanOrEqualMask8Bit : While incrementing scalar is less than or equal to + + /// svbool_t svwhilele_b8[_s32](int32_t op1, int32_t op2) : "WHILELE Presult.B, Wop1, Wop2" + public static unsafe Vector CreateWhileLessThanOrEqualMask8Bit(int left, int right); + + /// svbool_t svwhilele_b8[_s64](int64_t op1, int64_t op2) : "WHILELE Presult.B, Xop1, Xop2" + public static unsafe Vector CreateWhileLessThanOrEqualMask8Bit(long left, long right); + + /// svbool_t svwhilele_b8[_u32](uint32_t op1, uint32_t op2) : "WHILELS Presult.B, Wop1, Wop2" + public static unsafe Vector CreateWhileLessThanOrEqualMask8Bit(uint left, uint right); + + /// svbool_t svwhilele_b8[_u64](uint64_t op1, uint64_t op2) : "WHILELS Presult.B, Xop1, Xop2" + public static unsafe Vector CreateWhileLessThanOrEqualMask8Bit(ulong left, ulong right); + + + /// ExtractAfterLastScalar : Extract element after last + + /// float32_t svlasta[_f32](svbool_t pg, svfloat32_t op) : "LASTA Wresult, Pg, Zop.S" or "LASTA Sresult, Pg, Zop.S" + public static unsafe float ExtractAfterLastScalar(Vector value); + + /// float64_t svlasta[_f64](svbool_t pg, svfloat64_t op) : "LASTA Xresult, Pg, Zop.D" or "LASTA Dresult, Pg, Zop.D" + public static unsafe double ExtractAfterLastScalar(Vector value); + + /// int8_t svlasta[_s8](svbool_t pg, svint8_t op) : "LASTA Wresult, Pg, Zop.B" or "LASTA Bresult, Pg, Zop.B" + public static unsafe sbyte ExtractAfterLastScalar(Vector value); + + /// int16_t svlasta[_s16](svbool_t pg, svint16_t op) : "LASTA Wresult, Pg, Zop.H" or "LASTA Hresult, Pg, Zop.H" + public static unsafe short ExtractAfterLastScalar(Vector value); + + /// int32_t svlasta[_s32](svbool_t pg, svint32_t op) : "LASTA Wresult, Pg, Zop.S" or "LASTA Sresult, Pg, Zop.S" + public static unsafe int ExtractAfterLastScalar(Vector value); + + /// int64_t svlasta[_s64](svbool_t pg, svint64_t op) : "LASTA Xresult, Pg, Zop.D" or "LASTA Dresult, Pg, Zop.D" + public static unsafe long ExtractAfterLastScalar(Vector value); + + /// uint8_t svlasta[_u8](svbool_t pg, svuint8_t op) : "LASTA Wresult, Pg, Zop.B" or "LASTA Bresult, Pg, Zop.B" + public static unsafe byte ExtractAfterLastScalar(Vector value); + + /// uint16_t svlasta[_u16](svbool_t pg, svuint16_t op) : "LASTA Wresult, Pg, Zop.H" or "LASTA Hresult, Pg, Zop.H" + public static unsafe ushort ExtractAfterLastScalar(Vector value); + + /// uint32_t svlasta[_u32](svbool_t pg, svuint32_t op) : "LASTA Wresult, Pg, Zop.S" or "LASTA Sresult, Pg, Zop.S" + public static unsafe uint ExtractAfterLastScalar(Vector value); + + /// uint64_t svlasta[_u64](svbool_t pg, svuint64_t op) : "LASTA Xresult, Pg, Zop.D" or "LASTA Dresult, Pg, Zop.D" + public static unsafe ulong ExtractAfterLastScalar(Vector value); + + + /// ExtractAfterLastVector : Extract element after last + + /// float32_t svlasta[_f32](svbool_t pg, svfloat32_t op) : "LASTA Wresult, Pg, Zop.S" or "LASTA Sresult, Pg, Zop.S" + public static unsafe Vector ExtractAfterLastVector(Vector value); + + /// float64_t svlasta[_f64](svbool_t pg, svfloat64_t op) : "LASTA Xresult, Pg, Zop.D" or "LASTA Dresult, Pg, Zop.D" + public static unsafe Vector ExtractAfterLastVector(Vector value); + + /// int8_t svlasta[_s8](svbool_t pg, svint8_t op) : "LASTA Wresult, Pg, Zop.B" or "LASTA Bresult, Pg, Zop.B" + public static unsafe Vector ExtractAfterLastVector(Vector value); + + /// int16_t svlasta[_s16](svbool_t pg, svint16_t op) : "LASTA Wresult, Pg, Zop.H" or "LASTA Hresult, Pg, Zop.H" + public static unsafe Vector ExtractAfterLastVector(Vector value); + + /// int32_t svlasta[_s32](svbool_t pg, svint32_t op) : "LASTA Wresult, Pg, Zop.S" or "LASTA Sresult, Pg, Zop.S" + public static unsafe Vector ExtractAfterLastVector(Vector value); + + /// int64_t svlasta[_s64](svbool_t pg, svint64_t op) : "LASTA Xresult, Pg, Zop.D" or "LASTA Dresult, Pg, Zop.D" + public static unsafe Vector ExtractAfterLastVector(Vector value); + + /// uint8_t svlasta[_u8](svbool_t pg, svuint8_t op) : "LASTA Wresult, Pg, Zop.B" or "LASTA Bresult, Pg, Zop.B" + public static unsafe Vector ExtractAfterLastVector(Vector value); + + /// uint16_t svlasta[_u16](svbool_t pg, svuint16_t op) : "LASTA Wresult, Pg, Zop.H" or "LASTA Hresult, Pg, Zop.H" + public static unsafe Vector ExtractAfterLastVector(Vector value); + + /// uint32_t svlasta[_u32](svbool_t pg, svuint32_t op) : "LASTA Wresult, Pg, Zop.S" or "LASTA Sresult, Pg, Zop.S" + public static unsafe Vector ExtractAfterLastVector(Vector value); + + /// uint64_t svlasta[_u64](svbool_t pg, svuint64_t op) : "LASTA Xresult, Pg, Zop.D" or "LASTA Dresult, Pg, Zop.D" + public static unsafe Vector ExtractAfterLastVector(Vector value); + + + /// ExtractLastScalar : Extract last element + + /// float32_t svlastb[_f32](svbool_t pg, svfloat32_t op) : "LASTB Wresult, Pg, Zop.S" or "LASTB Sresult, Pg, Zop.S" + public static unsafe float ExtractLastScalar(Vector value); + + /// float64_t svlastb[_f64](svbool_t pg, svfloat64_t op) : "LASTB Xresult, Pg, Zop.D" or "LASTB Dresult, Pg, Zop.D" + public static unsafe double ExtractLastScalar(Vector value); + + /// int8_t svlastb[_s8](svbool_t pg, svint8_t op) : "LASTB Wresult, Pg, Zop.B" or "LASTB Bresult, Pg, Zop.B" + public static unsafe sbyte ExtractLastScalar(Vector value); + + /// int16_t svlastb[_s16](svbool_t pg, svint16_t op) : "LASTB Wresult, Pg, Zop.H" or "LASTB Hresult, Pg, Zop.H" + public static unsafe short ExtractLastScalar(Vector value); + + /// int32_t svlastb[_s32](svbool_t pg, svint32_t op) : "LASTB Wresult, Pg, Zop.S" or "LASTB Sresult, Pg, Zop.S" + public static unsafe int ExtractLastScalar(Vector value); + + /// int64_t svlastb[_s64](svbool_t pg, svint64_t op) : "LASTB Xresult, Pg, Zop.D" or "LASTB Dresult, Pg, Zop.D" + public static unsafe long ExtractLastScalar(Vector value); + + /// uint8_t svlastb[_u8](svbool_t pg, svuint8_t op) : "LASTB Wresult, Pg, Zop.B" or "LASTB Bresult, Pg, Zop.B" + public static unsafe byte ExtractLastScalar(Vector value); + + /// uint16_t svlastb[_u16](svbool_t pg, svuint16_t op) : "LASTB Wresult, Pg, Zop.H" or "LASTB Hresult, Pg, Zop.H" + public static unsafe ushort ExtractLastScalar(Vector value); + + /// uint32_t svlastb[_u32](svbool_t pg, svuint32_t op) : "LASTB Wresult, Pg, Zop.S" or "LASTB Sresult, Pg, Zop.S" + public static unsafe uint ExtractLastScalar(Vector value); + + /// uint64_t svlastb[_u64](svbool_t pg, svuint64_t op) : "LASTB Xresult, Pg, Zop.D" or "LASTB Dresult, Pg, Zop.D" + public static unsafe ulong ExtractLastScalar(Vector value); + + + /// ExtractLastVector : Extract last element + + /// float32_t svlastb[_f32](svbool_t pg, svfloat32_t op) : "LASTB Wresult, Pg, Zop.S" or "LASTB Sresult, Pg, Zop.S" + public static unsafe Vector ExtractLastVector(Vector value); + + /// float64_t svlastb[_f64](svbool_t pg, svfloat64_t op) : "LASTB Xresult, Pg, Zop.D" or "LASTB Dresult, Pg, Zop.D" + public static unsafe Vector ExtractLastVector(Vector value); + + /// int8_t svlastb[_s8](svbool_t pg, svint8_t op) : "LASTB Wresult, Pg, Zop.B" or "LASTB Bresult, Pg, Zop.B" + public static unsafe Vector ExtractLastVector(Vector value); + + /// int16_t svlastb[_s16](svbool_t pg, svint16_t op) : "LASTB Wresult, Pg, Zop.H" or "LASTB Hresult, Pg, Zop.H" + public static unsafe Vector ExtractLastVector(Vector value); + + /// int32_t svlastb[_s32](svbool_t pg, svint32_t op) : "LASTB Wresult, Pg, Zop.S" or "LASTB Sresult, Pg, Zop.S" + public static unsafe Vector ExtractLastVector(Vector value); + + /// int64_t svlastb[_s64](svbool_t pg, svint64_t op) : "LASTB Xresult, Pg, Zop.D" or "LASTB Dresult, Pg, Zop.D" + public static unsafe Vector ExtractLastVector(Vector value); + + /// uint8_t svlastb[_u8](svbool_t pg, svuint8_t op) : "LASTB Wresult, Pg, Zop.B" or "LASTB Bresult, Pg, Zop.B" + public static unsafe Vector ExtractLastVector(Vector value); + + /// uint16_t svlastb[_u16](svbool_t pg, svuint16_t op) : "LASTB Wresult, Pg, Zop.H" or "LASTB Hresult, Pg, Zop.H" + public static unsafe Vector ExtractLastVector(Vector value); + + /// uint32_t svlastb[_u32](svbool_t pg, svuint32_t op) : "LASTB Wresult, Pg, Zop.S" or "LASTB Sresult, Pg, Zop.S" + public static unsafe Vector ExtractLastVector(Vector value); + + /// uint64_t svlastb[_u64](svbool_t pg, svuint64_t op) : "LASTB Xresult, Pg, Zop.D" or "LASTB Dresult, Pg, Zop.D" + public static unsafe Vector ExtractLastVector(Vector value); + + + /// ExtractVector : Extract vector from pair of vectors + + /// svfloat32_t svext[_f32](svfloat32_t op1, svfloat32_t op2, uint64_t imm3) : "EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 4" or "MOVPRFX Zresult, Zop1; EXT Zresult.B, Zresult.B, Zop2.B, #imm3 * 4" + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index); + + /// svfloat64_t svext[_f64](svfloat64_t op1, svfloat64_t op2, uint64_t imm3) : "EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 8" or "MOVPRFX Zresult, Zop1; EXT Zresult.B, Zresult.B, Zop2.B, #imm3 * 8" + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index); + + /// svint8_t svext[_s8](svint8_t op1, svint8_t op2, uint64_t imm3) : "EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3" or "MOVPRFX Zresult, Zop1; EXT Zresult.B, Zresult.B, Zop2.B, #imm3" + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index); + + /// svint16_t svext[_s16](svint16_t op1, svint16_t op2, uint64_t imm3) : "EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 2" or "MOVPRFX Zresult, Zop1; EXT Zresult.B, Zresult.B, Zop2.B, #imm3 * 2" + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index); + + /// svint32_t svext[_s32](svint32_t op1, svint32_t op2, uint64_t imm3) : "EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 4" or "MOVPRFX Zresult, Zop1; EXT Zresult.B, Zresult.B, Zop2.B, #imm3 * 4" + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index); + + /// svint64_t svext[_s64](svint64_t op1, svint64_t op2, uint64_t imm3) : "EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 8" or "MOVPRFX Zresult, Zop1; EXT Zresult.B, Zresult.B, Zop2.B, #imm3 * 8" + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index); + + /// svuint8_t svext[_u8](svuint8_t op1, svuint8_t op2, uint64_t imm3) : "EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3" or "MOVPRFX Zresult, Zop1; EXT Zresult.B, Zresult.B, Zop2.B, #imm3" + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index); + + /// svuint16_t svext[_u16](svuint16_t op1, svuint16_t op2, uint64_t imm3) : "EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 2" or "MOVPRFX Zresult, Zop1; EXT Zresult.B, Zresult.B, Zop2.B, #imm3 * 2" + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index); + + /// svuint32_t svext[_u32](svuint32_t op1, svuint32_t op2, uint64_t imm3) : "EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 4" or "MOVPRFX Zresult, Zop1; EXT Zresult.B, Zresult.B, Zop2.B, #imm3 * 4" + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index); + + /// svuint64_t svext[_u64](svuint64_t op1, svuint64_t op2, uint64_t imm3) : "EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 8" or "MOVPRFX Zresult, Zop1; EXT Zresult.B, Zresult.B, Zop2.B, #imm3 * 8" + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index); + + + /// TestAnyTrue : Test whether any active element is true + + /// bool svptest_any(svbool_t pg, svbool_t op) : PTEST + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask); + + /// bool svptest_any(svbool_t pg, svbool_t op) : PTEST + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask); + + /// bool svptest_any(svbool_t pg, svbool_t op) : PTEST + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask); + + /// bool svptest_any(svbool_t pg, svbool_t op) : PTEST + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask); + + /// bool svptest_any(svbool_t pg, svbool_t op) : PTEST + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask); + + /// bool svptest_any(svbool_t pg, svbool_t op) : PTEST + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask); + + /// bool svptest_any(svbool_t pg, svbool_t op) : PTEST + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask); + + /// bool svptest_any(svbool_t pg, svbool_t op) : PTEST + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask); + + + /// TestFirstTrue : Test whether the first active element is true + + /// bool svptest_first(svbool_t pg, svbool_t op) : PTEST + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask); + + /// bool svptest_first(svbool_t pg, svbool_t op) : PTEST + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask); + + /// bool svptest_first(svbool_t pg, svbool_t op) : PTEST + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask); + + /// bool svptest_first(svbool_t pg, svbool_t op) : PTEST + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask); + + /// bool svptest_first(svbool_t pg, svbool_t op) : PTEST + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask); + + /// bool svptest_first(svbool_t pg, svbool_t op) : PTEST + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask); + + /// bool svptest_first(svbool_t pg, svbool_t op) : PTEST + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask); + + /// bool svptest_first(svbool_t pg, svbool_t op) : PTEST + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask); + + + /// TestLastTrue : Test whether the last active element is true + + /// bool svptest_last(svbool_t pg, svbool_t op) : PTEST + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask); + + /// bool svptest_last(svbool_t pg, svbool_t op) : PTEST + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask); + + /// bool svptest_last(svbool_t pg, svbool_t op) : PTEST + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask); + + /// bool svptest_last(svbool_t pg, svbool_t op) : PTEST + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask); + + /// bool svptest_last(svbool_t pg, svbool_t op) : PTEST + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask); + + /// bool svptest_last(svbool_t pg, svbool_t op) : PTEST + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask); + + /// bool svptest_last(svbool_t pg, svbool_t op) : PTEST + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask); + + /// bool svptest_last(svbool_t pg, svbool_t op) : PTEST + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask); + + + /// total method signatures: 354 + /// total method names: 60 +} + + /// Optional Entries: + /// public static unsafe Vector AbsoluteCompareGreaterThan(Vector left, float right); // svacgt[_n_f32] + /// public static unsafe Vector AbsoluteCompareGreaterThan(Vector left, double right); // svacgt[_n_f64] + /// public static unsafe Vector AbsoluteCompareGreaterThanOrEqual(Vector left, float right); // svacge[_n_f32] + /// public static unsafe Vector AbsoluteCompareGreaterThanOrEqual(Vector left, double right); // svacge[_n_f64] + /// public static unsafe Vector AbsoluteCompareLessThan(Vector left, float right); // svaclt[_n_f32] + /// public static unsafe Vector AbsoluteCompareLessThan(Vector left, double right); // svaclt[_n_f64] + /// public static unsafe Vector AbsoluteCompareLessThanOrEqual(Vector left, float right); // svacle[_n_f32] + /// public static unsafe Vector AbsoluteCompareLessThanOrEqual(Vector left, double right); // svacle[_n_f64] + /// public static unsafe Vector CompareEqual(Vector left, float right); // svcmpeq[_n_f32] + /// public static unsafe Vector CompareEqual(Vector left, double right); // svcmpeq[_n_f64] + /// public static unsafe Vector CompareEqual(Vector left, sbyte right); // svcmpeq[_n_s8] + /// public static unsafe Vector CompareEqual(Vector left, short right); // svcmpeq[_n_s16] + /// public static unsafe Vector CompareEqual(Vector left, int right); // svcmpeq[_n_s32] + /// public static unsafe Vector CompareEqual(Vector left, long right); // svcmpeq[_n_s64] + /// public static unsafe Vector CompareEqual(Vector left, byte right); // svcmpeq[_n_u8] + /// public static unsafe Vector CompareEqual(Vector left, ushort right); // svcmpeq[_n_u16] + /// public static unsafe Vector CompareEqual(Vector left, uint right); // svcmpeq[_n_u32] + /// public static unsafe Vector CompareEqual(Vector left, ulong right); // svcmpeq[_n_u64] + /// public static unsafe Vector CompareEqual(Vector left, long right); // svcmpeq_wide[_n_s8] + /// public static unsafe Vector CompareEqual(Vector left, long right); // svcmpeq_wide[_n_s16] + /// public static unsafe Vector CompareEqual(Vector left, long right); // svcmpeq_wide[_n_s32] + /// public static unsafe Vector CompareGreaterThan(Vector left, float right); // svcmpgt[_n_f32] + /// public static unsafe Vector CompareGreaterThan(Vector left, double right); // svcmpgt[_n_f64] + /// public static unsafe Vector CompareGreaterThan(Vector left, sbyte right); // svcmpgt[_n_s8] + /// public static unsafe Vector CompareGreaterThan(Vector left, short right); // svcmpgt[_n_s16] + /// public static unsafe Vector CompareGreaterThan(Vector left, int right); // svcmpgt[_n_s32] + /// public static unsafe Vector CompareGreaterThan(Vector left, long right); // svcmpgt[_n_s64] + /// public static unsafe Vector CompareGreaterThan(Vector left, byte right); // svcmpgt[_n_u8] + /// public static unsafe Vector CompareGreaterThan(Vector left, ushort right); // svcmpgt[_n_u16] + /// public static unsafe Vector CompareGreaterThan(Vector left, uint right); // svcmpgt[_n_u32] + /// public static unsafe Vector CompareGreaterThan(Vector left, ulong right); // svcmpgt[_n_u64] + /// public static unsafe Vector CompareGreaterThan(Vector left, long right); // svcmpgt_wide[_n_s8] + /// public static unsafe Vector CompareGreaterThan(Vector left, long right); // svcmpgt_wide[_n_s16] + /// public static unsafe Vector CompareGreaterThan(Vector left, long right); // svcmpgt_wide[_n_s32] + /// public static unsafe Vector CompareGreaterThan(Vector left, ulong right); // svcmpgt_wide[_n_u8] + /// public static unsafe Vector CompareGreaterThan(Vector left, ulong right); // svcmpgt_wide[_n_u16] + /// public static unsafe Vector CompareGreaterThan(Vector left, ulong right); // svcmpgt_wide[_n_u32] + /// public static unsafe Vector CompareGreaterThanOrEqual(Vector left, float right); // svcmpge[_n_f32] + /// public static unsafe Vector CompareGreaterThanOrEqual(Vector left, double right); // svcmpge[_n_f64] + /// public static unsafe Vector CompareGreaterThanOrEqual(Vector left, sbyte right); // svcmpge[_n_s8] + /// public static unsafe Vector CompareGreaterThanOrEqual(Vector left, short right); // svcmpge[_n_s16] + /// public static unsafe Vector CompareGreaterThanOrEqual(Vector left, int right); // svcmpge[_n_s32] + /// public static unsafe Vector CompareGreaterThanOrEqual(Vector left, long right); // svcmpge[_n_s64] + /// public static unsafe Vector CompareGreaterThanOrEqual(Vector left, byte right); // svcmpge[_n_u8] + /// public static unsafe Vector CompareGreaterThanOrEqual(Vector left, ushort right); // svcmpge[_n_u16] + /// public static unsafe Vector CompareGreaterThanOrEqual(Vector left, uint right); // svcmpge[_n_u32] + /// public static unsafe Vector CompareGreaterThanOrEqual(Vector left, ulong right); // svcmpge[_n_u64] + /// public static unsafe Vector CompareGreaterThanOrEqual(Vector left, long right); // svcmpge_wide[_n_s8] + /// public static unsafe Vector CompareGreaterThanOrEqual(Vector left, long right); // svcmpge_wide[_n_s16] + /// public static unsafe Vector CompareGreaterThanOrEqual(Vector left, long right); // svcmpge_wide[_n_s32] + /// public static unsafe Vector CompareGreaterThanOrEqual(Vector left, ulong right); // svcmpge_wide[_n_u8] + /// public static unsafe Vector CompareGreaterThanOrEqual(Vector left, ulong right); // svcmpge_wide[_n_u16] + /// public static unsafe Vector CompareGreaterThanOrEqual(Vector left, ulong right); // svcmpge_wide[_n_u32] + /// public static unsafe Vector CompareLessThan(Vector left, float right); // svcmplt[_n_f32] + /// public static unsafe Vector CompareLessThan(Vector left, double right); // svcmplt[_n_f64] + /// public static unsafe Vector CompareLessThan(Vector left, sbyte right); // svcmplt[_n_s8] + /// public static unsafe Vector CompareLessThan(Vector left, short right); // svcmplt[_n_s16] + /// public static unsafe Vector CompareLessThan(Vector left, int right); // svcmplt[_n_s32] + /// public static unsafe Vector CompareLessThan(Vector left, long right); // svcmplt[_n_s64] + /// public static unsafe Vector CompareLessThan(Vector left, byte right); // svcmplt[_n_u8] + /// public static unsafe Vector CompareLessThan(Vector left, ushort right); // svcmplt[_n_u16] + /// public static unsafe Vector CompareLessThan(Vector left, uint right); // svcmplt[_n_u32] + /// public static unsafe Vector CompareLessThan(Vector left, ulong right); // svcmplt[_n_u64] + /// public static unsafe Vector CompareLessThan(Vector left, long right); // svcmplt_wide[_n_s8] + /// public static unsafe Vector CompareLessThan(Vector left, long right); // svcmplt_wide[_n_s16] + /// public static unsafe Vector CompareLessThan(Vector left, long right); // svcmplt_wide[_n_s32] + /// public static unsafe Vector CompareLessThan(Vector left, ulong right); // svcmplt_wide[_n_u8] + /// public static unsafe Vector CompareLessThan(Vector left, ulong right); // svcmplt_wide[_n_u16] + /// public static unsafe Vector CompareLessThan(Vector left, ulong right); // svcmplt_wide[_n_u32] + /// public static unsafe Vector CompareLessThanOrEqual(Vector left, float right); // svcmple[_n_f32] + /// public static unsafe Vector CompareLessThanOrEqual(Vector left, double right); // svcmple[_n_f64] + /// public static unsafe Vector CompareLessThanOrEqual(Vector left, sbyte right); // svcmple[_n_s8] + /// public static unsafe Vector CompareLessThanOrEqual(Vector left, short right); // svcmple[_n_s16] + /// public static unsafe Vector CompareLessThanOrEqual(Vector left, int right); // svcmple[_n_s32] + /// public static unsafe Vector CompareLessThanOrEqual(Vector left, long right); // svcmple[_n_s64] + /// public static unsafe Vector CompareLessThanOrEqual(Vector left, byte right); // svcmple[_n_u8] + /// public static unsafe Vector CompareLessThanOrEqual(Vector left, ushort right); // svcmple[_n_u16] + /// public static unsafe Vector CompareLessThanOrEqual(Vector left, uint right); // svcmple[_n_u32] + /// public static unsafe Vector CompareLessThanOrEqual(Vector left, ulong right); // svcmple[_n_u64] + /// public static unsafe Vector CompareLessThanOrEqual(Vector left, long right); // svcmple_wide[_n_s8] + /// public static unsafe Vector CompareLessThanOrEqual(Vector left, long right); // svcmple_wide[_n_s16] + /// public static unsafe Vector CompareLessThanOrEqual(Vector left, long right); // svcmple_wide[_n_s32] + /// public static unsafe Vector CompareLessThanOrEqual(Vector left, ulong right); // svcmple_wide[_n_u8] + /// public static unsafe Vector CompareLessThanOrEqual(Vector left, ulong right); // svcmple_wide[_n_u16] + /// public static unsafe Vector CompareLessThanOrEqual(Vector left, ulong right); // svcmple_wide[_n_u32] + /// public static unsafe Vector CompareNotEqualTo(Vector left, float right); // svcmpne[_n_f32] + /// public static unsafe Vector CompareNotEqualTo(Vector left, double right); // svcmpne[_n_f64] + /// public static unsafe Vector CompareNotEqualTo(Vector left, sbyte right); // svcmpne[_n_s8] + /// public static unsafe Vector CompareNotEqualTo(Vector left, short right); // svcmpne[_n_s16] + /// public static unsafe Vector CompareNotEqualTo(Vector left, int right); // svcmpne[_n_s32] + /// public static unsafe Vector CompareNotEqualTo(Vector left, long right); // svcmpne[_n_s64] + /// public static unsafe Vector CompareNotEqualTo(Vector left, byte right); // svcmpne[_n_u8] + /// public static unsafe Vector CompareNotEqualTo(Vector left, ushort right); // svcmpne[_n_u16] + /// public static unsafe Vector CompareNotEqualTo(Vector left, uint right); // svcmpne[_n_u32] + /// public static unsafe Vector CompareNotEqualTo(Vector left, ulong right); // svcmpne[_n_u64] + /// public static unsafe Vector CompareNotEqualTo(Vector left, long right); // svcmpne_wide[_n_s8] + /// public static unsafe Vector CompareNotEqualTo(Vector left, long right); // svcmpne_wide[_n_s16] + /// public static unsafe Vector CompareNotEqualTo(Vector left, long right); // svcmpne_wide[_n_s32] + /// public static unsafe Vector CompareUnordered(Vector left, float right); // svcmpuo[_n_f32] + /// public static unsafe Vector CompareUnordered(Vector left, double right); // svcmpuo[_n_f64] + /// public static unsafe float ConditionalExtractAfterLastActiveElement(Vector mask, float defaultValue, Vector data); // svclasta[_n_f32] + /// public static unsafe double ConditionalExtractAfterLastActiveElement(Vector mask, double defaultValue, Vector data); // svclasta[_n_f64] + /// public static unsafe sbyte ConditionalExtractAfterLastActiveElement(Vector mask, sbyte defaultValue, Vector data); // svclasta[_n_s8] + /// public static unsafe short ConditionalExtractAfterLastActiveElement(Vector mask, short defaultValue, Vector data); // svclasta[_n_s16] + /// public static unsafe int ConditionalExtractAfterLastActiveElement(Vector mask, int defaultValue, Vector data); // svclasta[_n_s32] + /// public static unsafe long ConditionalExtractAfterLastActiveElement(Vector mask, long defaultValue, Vector data); // svclasta[_n_s64] + /// public static unsafe byte ConditionalExtractAfterLastActiveElement(Vector mask, byte defaultValue, Vector data); // svclasta[_n_u8] + /// public static unsafe ushort ConditionalExtractAfterLastActiveElement(Vector mask, ushort defaultValue, Vector data); // svclasta[_n_u16] + /// public static unsafe uint ConditionalExtractAfterLastActiveElement(Vector mask, uint defaultValue, Vector data); // svclasta[_n_u32] + /// public static unsafe ulong ConditionalExtractAfterLastActiveElement(Vector mask, ulong defaultValue, Vector data); // svclasta[_n_u64] + /// public static unsafe float ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, float defaultScalar, Vector data); // svclasta[_n_f32] + /// public static unsafe double ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, double defaultScalar, Vector data); // svclasta[_n_f64] + /// public static unsafe sbyte ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, sbyte defaultScalar, Vector data); // svclasta[_n_s8] + /// public static unsafe short ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, short defaultScalar, Vector data); // svclasta[_n_s16] + /// public static unsafe int ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, int defaultScalar, Vector data); // svclasta[_n_s32] + /// public static unsafe long ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, long defaultScalar, Vector data); // svclasta[_n_s64] + /// public static unsafe byte ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, byte defaultScalar, Vector data); // svclasta[_n_u8] + /// public static unsafe ushort ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, ushort defaultScalar, Vector data); // svclasta[_n_u16] + /// public static unsafe uint ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, uint defaultScalar, Vector data); // svclasta[_n_u32] + /// public static unsafe ulong ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, ulong defaultScalar, Vector data); // svclasta[_n_u64] + /// public static unsafe float ConditionalExtractLastActiveElement(Vector mask, float defaultValue, Vector data); // svclastb[_n_f32] + /// public static unsafe double ConditionalExtractLastActiveElement(Vector mask, double defaultValue, Vector data); // svclastb[_n_f64] + /// public static unsafe sbyte ConditionalExtractLastActiveElement(Vector mask, sbyte defaultValue, Vector data); // svclastb[_n_s8] + /// public static unsafe short ConditionalExtractLastActiveElement(Vector mask, short defaultValue, Vector data); // svclastb[_n_s16] + /// public static unsafe int ConditionalExtractLastActiveElement(Vector mask, int defaultValue, Vector data); // svclastb[_n_s32] + /// public static unsafe long ConditionalExtractLastActiveElement(Vector mask, long defaultValue, Vector data); // svclastb[_n_s64] + /// public static unsafe byte ConditionalExtractLastActiveElement(Vector mask, byte defaultValue, Vector data); // svclastb[_n_u8] + /// public static unsafe ushort ConditionalExtractLastActiveElement(Vector mask, ushort defaultValue, Vector data); // svclastb[_n_u16] + /// public static unsafe uint ConditionalExtractLastActiveElement(Vector mask, uint defaultValue, Vector data); // svclastb[_n_u32] + /// public static unsafe ulong ConditionalExtractLastActiveElement(Vector mask, ulong defaultValue, Vector data); // svclastb[_n_u64] + /// public static unsafe float ConditionalExtractLastActiveElementAndReplicate(Vector mask, float fallback, Vector data); // svclastb[_n_f32] + /// public static unsafe double ConditionalExtractLastActiveElementAndReplicate(Vector mask, double fallback, Vector data); // svclastb[_n_f64] + /// public static unsafe sbyte ConditionalExtractLastActiveElementAndReplicate(Vector mask, sbyte fallback, Vector data); // svclastb[_n_s8] + /// public static unsafe short ConditionalExtractLastActiveElementAndReplicate(Vector mask, short fallback, Vector data); // svclastb[_n_s16] + /// public static unsafe int ConditionalExtractLastActiveElementAndReplicate(Vector mask, int fallback, Vector data); // svclastb[_n_s32] + /// public static unsafe long ConditionalExtractLastActiveElementAndReplicate(Vector mask, long fallback, Vector data); // svclastb[_n_s64] + /// public static unsafe byte ConditionalExtractLastActiveElementAndReplicate(Vector mask, byte fallback, Vector data); // svclastb[_n_u8] + /// public static unsafe ushort ConditionalExtractLastActiveElementAndReplicate(Vector mask, ushort fallback, Vector data); // svclastb[_n_u16] + /// public static unsafe uint ConditionalExtractLastActiveElementAndReplicate(Vector mask, uint fallback, Vector data); // svclastb[_n_u32] + /// public static unsafe ulong ConditionalExtractLastActiveElementAndReplicate(Vector mask, ulong fallback, Vector data); // svclastb[_n_u64] + /// Total Maybe: 140 + + /// Rejected: + /// public static unsafe Vector CreateTrueMaskByte(); // svptrue_b8 + /// public static unsafe Vector CreateTrueMaskDouble(); // svptrue_b8 + /// public static unsafe Vector CreateTrueMaskInt16(); // svptrue_b8 + /// public static unsafe Vector CreateTrueMaskInt32(); // svptrue_b8 + /// public static unsafe Vector CreateTrueMaskInt64(); // svptrue_b8 + /// public static unsafe Vector CreateTrueMaskSByte(); // svptrue_b8 + /// public static unsafe Vector CreateTrueMaskSingle(); // svptrue_b8 + /// public static unsafe Vector CreateTrueMaskUInt16(); // svptrue_b16 + /// public static unsafe Vector CreateTrueMaskUInt32(); // svptrue_b32 + /// public static unsafe Vector CreateTrueMaskUInt64(); // svptrue_b64 + /// Total Rejected: 10 + + /// Total ACLE covered across API: 548 + diff --git a/sve_api/out_api/apiraw_FEAT_SVE__maths.cs b/sve_api/out_api/apiraw_FEAT_SVE__maths.cs new file mode 100644 index 0000000000000..dc1ecf48d0255 --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_SVE__maths.cs @@ -0,0 +1,1324 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: maths +{ + + /// T: float, double, sbyte, short, int, long + public static unsafe Vector Abs(Vector value); // FABS or ABS // predicated, MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector AbsoluteDifference(Vector left, Vector right); // FABD or SABD or UABD // predicated, MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector Add(Vector left, Vector right); // FADD or ADD // predicated, MOVPRFX + + /// T: float, double, long, ulong + public static unsafe Vector AddAcross(Vector value); // FADDV or UADDV // predicated + + /// T: [long, sbyte], [long, short], [long, int], [ulong, byte], [ulong, ushort], [ulong, uint] + public static unsafe Vector AddAcross(Vector value); // SADDV or UADDV // predicated + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector AddSaturate(Vector left, Vector right); // SQADD or UQADD + + /// T: float, double, int, long, uint, ulong + public static unsafe Vector Divide(Vector left, Vector right); // FDIV or SDIV or UDIV or FDIVR or SDIVR or UDIVR // predicated, MOVPRFX + + /// T: [int, sbyte], [long, short], [uint, byte], [ulong, ushort] + public static unsafe Vector DotProduct(Vector addend, Vector left, Vector right); // SDOT or UDOT // MOVPRFX + + /// T: [int, sbyte], [long, short], [uint, byte], [ulong, ushort] + public static unsafe Vector DotProductBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // SDOT or UDOT // MOVPRFX + + /// T: float, double + public static unsafe Vector FusedMultiplyAdd(Vector addend, Vector left, Vector right); // FMLA or FMAD // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector FusedMultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // FMLA // MOVPRFX + + /// T: float, double + public static unsafe Vector FusedMultiplyAddNegated(Vector addend, Vector left, Vector right); // FNMLA or FNMAD // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector FusedMultiplySubtract(Vector minuend, Vector left, Vector right); // FMLS or FMSB // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector FusedMultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // FMLS // MOVPRFX + + /// T: float, double + public static unsafe Vector FusedMultiplySubtractNegated(Vector minuend, Vector left, Vector right); // FNMLS or FNMSB // predicated, MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector Max(Vector left, Vector right); // FMAX or SMAX or UMAX // predicated, MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector MaxAcross(Vector value); // FMAXV or SMAXV or UMAXV // predicated + + /// T: float, double + public static unsafe Vector MaxNumber(Vector left, Vector right); // FMAXNM // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector MaxNumberAcross(Vector value); // FMAXNMV // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector Min(Vector left, Vector right); // FMIN or SMIN or UMIN // predicated, MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector MinAcross(Vector value); // FMINV or SMINV or UMINV // predicated + + /// T: float, double + public static unsafe Vector MinNumber(Vector left, Vector right); // FMINNM // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector MinNumberAcross(Vector value); // FMINNMV // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector Multiply(Vector left, Vector right); // FMUL or MUL // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right); // MLA or MAD // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex); // FMUL + + /// T: float, double + public static unsafe Vector MultiplyExtended(Vector left, Vector right); // FMULX // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right); // MLS or MSB // predicated, MOVPRFX + + /// T: float, double, sbyte, short, int, long + public static unsafe Vector Negate(Vector value); // FNEG or NEG // predicated, MOVPRFX + + /// T: int, long + public static unsafe Vector SignExtend16(Vector value); // SXTH // predicated, MOVPRFX + + public static unsafe Vector SignExtend32(Vector value); // SXTW // predicated, MOVPRFX + + /// T: short, int, long + public static unsafe Vector SignExtend8(Vector value); // SXTB // predicated, MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector SignExtendWideningLower(Vector value); // SUNPKLO + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector SignExtendWideningUpper(Vector value); // SUNPKHI + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector Subtract(Vector left, Vector right); // FSUB or SUB or FSUBR or SUBR // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector SubtractSaturate(Vector left, Vector right); // SQSUB or UQSUB + + /// T: uint, ulong + public static unsafe Vector ZeroExtend16(Vector value); // UXTH or AND // predicated, MOVPRFX + + public static unsafe Vector ZeroExtend32(Vector value); // UXTW or AND // predicated, MOVPRFX + + /// T: ushort, uint, ulong + public static unsafe Vector ZeroExtend8(Vector value); // UXTB or AND // predicated, MOVPRFX + + /// T: [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector ZeroExtendWideningLower(Vector value); // UUNPKLO or PUNPKLO + + /// T: [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector ZeroExtendWideningUpper(Vector value); // UUNPKHI or PUNPKHI + + /// total method signatures: 41 + + + /// Optional Entries: + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector MultiplyReturningHighHalf(Vector left, Vector right); // SMULH or UMULH // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector MultiplyReturningHighHalf(Vector left, T right); // SMULH or UMULH // predicated, MOVPRFX + + /// total optional method signatures: 2 + +} + + +/// Full API +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: maths +{ + /// Abs : Absolute value + + /// svfloat32_t svabs[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) : "FABS Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; FABS Zresult.S, Pg/M, Zop.S" + /// svfloat32_t svabs[_f32]_x(svbool_t pg, svfloat32_t op) : "FABS Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; FABS Zresult.S, Pg/M, Zop.S" + /// svfloat32_t svabs[_f32]_z(svbool_t pg, svfloat32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; FABS Zresult.S, Pg/M, Zop.S" + public static unsafe Vector Abs(Vector value); + + /// svfloat64_t svabs[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) : "FABS Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; FABS Zresult.D, Pg/M, Zop.D" + /// svfloat64_t svabs[_f64]_x(svbool_t pg, svfloat64_t op) : "FABS Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; FABS Zresult.D, Pg/M, Zop.D" + /// svfloat64_t svabs[_f64]_z(svbool_t pg, svfloat64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; FABS Zresult.D, Pg/M, Zop.D" + public static unsafe Vector Abs(Vector value); + + /// svint8_t svabs[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) : "ABS Ztied.B, Pg/M, Zop.B" or "MOVPRFX Zresult, Zinactive; ABS Zresult.B, Pg/M, Zop.B" + /// svint8_t svabs[_s8]_x(svbool_t pg, svint8_t op) : "ABS Ztied.B, Pg/M, Ztied.B" or "MOVPRFX Zresult, Zop; ABS Zresult.B, Pg/M, Zop.B" + /// svint8_t svabs[_s8]_z(svbool_t pg, svint8_t op) : "MOVPRFX Zresult.B, Pg/Z, Zop.B; ABS Zresult.B, Pg/M, Zop.B" + public static unsafe Vector Abs(Vector value); + + /// svint16_t svabs[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) : "ABS Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; ABS Zresult.H, Pg/M, Zop.H" + /// svint16_t svabs[_s16]_x(svbool_t pg, svint16_t op) : "ABS Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; ABS Zresult.H, Pg/M, Zop.H" + /// svint16_t svabs[_s16]_z(svbool_t pg, svint16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; ABS Zresult.H, Pg/M, Zop.H" + public static unsafe Vector Abs(Vector value); + + /// svint32_t svabs[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) : "ABS Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; ABS Zresult.S, Pg/M, Zop.S" + /// svint32_t svabs[_s32]_x(svbool_t pg, svint32_t op) : "ABS Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; ABS Zresult.S, Pg/M, Zop.S" + /// svint32_t svabs[_s32]_z(svbool_t pg, svint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; ABS Zresult.S, Pg/M, Zop.S" + public static unsafe Vector Abs(Vector value); + + /// svint64_t svabs[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) : "ABS Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; ABS Zresult.D, Pg/M, Zop.D" + /// svint64_t svabs[_s64]_x(svbool_t pg, svint64_t op) : "ABS Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; ABS Zresult.D, Pg/M, Zop.D" + /// svint64_t svabs[_s64]_z(svbool_t pg, svint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; ABS Zresult.D, Pg/M, Zop.D" + public static unsafe Vector Abs(Vector value); + + + /// AbsoluteDifference : Absolute difference + + /// svfloat32_t svabd[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FABD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; FABD Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svfloat32_t svabd[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FABD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "FABD Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; FABD Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svfloat32_t svabd[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; FABD Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; FABD Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector AbsoluteDifference(Vector left, Vector right); + + /// svfloat64_t svabd[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FABD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; FABD Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svfloat64_t svabd[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FABD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "FABD Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; FABD Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svfloat64_t svabd[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; FABD Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; FABD Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector AbsoluteDifference(Vector left, Vector right); + + /// svint8_t svabd[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) : "SABD Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; SABD Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svabd[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) : "SABD Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "SABD Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; SABD Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svabd[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; SABD Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; SABD Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector AbsoluteDifference(Vector left, Vector right); + + /// svint16_t svabd[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) : "SABD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; SABD Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svabd[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) : "SABD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "SABD Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; SABD Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svabd[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; SABD Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; SABD Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector AbsoluteDifference(Vector left, Vector right); + + /// svint32_t svabd[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) : "SABD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; SABD Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svabd[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) : "SABD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "SABD Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; SABD Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svabd[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; SABD Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; SABD Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector AbsoluteDifference(Vector left, Vector right); + + /// svint64_t svabd[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) : "SABD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; SABD Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svabd[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) : "SABD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "SABD Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; SABD Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svabd[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; SABD Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; SABD Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector AbsoluteDifference(Vector left, Vector right); + + /// svuint8_t svabd[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) : "UABD Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; UABD Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svabd[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) : "UABD Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "UABD Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; UABD Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svabd[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; UABD Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; UABD Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector AbsoluteDifference(Vector left, Vector right); + + /// svuint16_t svabd[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) : "UABD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; UABD Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svabd[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) : "UABD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "UABD Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; UABD Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svabd[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; UABD Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; UABD Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector AbsoluteDifference(Vector left, Vector right); + + /// svuint32_t svabd[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) : "UABD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; UABD Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svabd[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) : "UABD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "UABD Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; UABD Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svabd[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; UABD Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; UABD Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector AbsoluteDifference(Vector left, Vector right); + + /// svuint64_t svabd[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) : "UABD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; UABD Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svabd[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) : "UABD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "UABD Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; UABD Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svabd[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; UABD Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; UABD Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector AbsoluteDifference(Vector left, Vector right); + + + /// Add : Add + + /// svfloat32_t svadd[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; FADD Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svfloat32_t svadd[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "FADD Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "FADD Zresult.S, Zop1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; FADD Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svfloat32_t svadd[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; FADD Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; FADD Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector Add(Vector left, Vector right); + + /// svfloat64_t svadd[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; FADD Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svfloat64_t svadd[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "FADD Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "FADD Zresult.D, Zop1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; FADD Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svfloat64_t svadd[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; FADD Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; FADD Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector Add(Vector left, Vector right); + + /// svint8_t svadd[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) : "ADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; ADD Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svadd[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) : "ADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "ADD Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "ADD Zresult.B, Zop1.B, Zop2.B" + /// svint8_t svadd[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; ADD Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; ADD Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector Add(Vector left, Vector right); + + /// svint16_t svadd[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) : "ADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; ADD Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svadd[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) : "ADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "ADD Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "ADD Zresult.H, Zop1.H, Zop2.H" + /// svint16_t svadd[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; ADD Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; ADD Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector Add(Vector left, Vector right); + + /// svint32_t svadd[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) : "ADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; ADD Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svadd[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) : "ADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "ADD Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "ADD Zresult.S, Zop1.S, Zop2.S" + /// svint32_t svadd[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; ADD Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; ADD Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector Add(Vector left, Vector right); + + /// svint64_t svadd[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) : "ADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; ADD Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svadd[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) : "ADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "ADD Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "ADD Zresult.D, Zop1.D, Zop2.D" + /// svint64_t svadd[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; ADD Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; ADD Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector Add(Vector left, Vector right); + + /// svuint8_t svadd[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) : "ADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; ADD Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svadd[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) : "ADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "ADD Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "ADD Zresult.B, Zop1.B, Zop2.B" + /// svuint8_t svadd[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; ADD Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; ADD Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector Add(Vector left, Vector right); + + /// svuint16_t svadd[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) : "ADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; ADD Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svadd[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) : "ADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "ADD Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "ADD Zresult.H, Zop1.H, Zop2.H" + /// svuint16_t svadd[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; ADD Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; ADD Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector Add(Vector left, Vector right); + + /// svuint32_t svadd[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) : "ADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; ADD Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svadd[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) : "ADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "ADD Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "ADD Zresult.S, Zop1.S, Zop2.S" + /// svuint32_t svadd[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; ADD Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; ADD Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector Add(Vector left, Vector right); + + /// svuint64_t svadd[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) : "ADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; ADD Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svadd[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) : "ADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "ADD Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "ADD Zresult.D, Zop1.D, Zop2.D" + /// svuint64_t svadd[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; ADD Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; ADD Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector Add(Vector left, Vector right); + + + /// AddAcross : Add reduction + + /// float32_t svaddv[_f32](svbool_t pg, svfloat32_t op) : "FADDV Sresult, Pg, Zop.S" + public static unsafe Vector AddAcross(Vector value); + + /// float64_t svaddv[_f64](svbool_t pg, svfloat64_t op) : "FADDV Dresult, Pg, Zop.D" + public static unsafe Vector AddAcross(Vector value); + + /// int64_t svaddv[_s8](svbool_t pg, svint8_t op) : "SADDV Dresult, Pg, Zop.B" + public static unsafe Vector AddAcross(Vector value); + + /// int64_t svaddv[_s16](svbool_t pg, svint16_t op) : "SADDV Dresult, Pg, Zop.H" + public static unsafe Vector AddAcross(Vector value); + + /// int64_t svaddv[_s32](svbool_t pg, svint32_t op) : "SADDV Dresult, Pg, Zop.S" + public static unsafe Vector AddAcross(Vector value); + + /// int64_t svaddv[_s64](svbool_t pg, svint64_t op) : "UADDV Dresult, Pg, Zop.D" + public static unsafe Vector AddAcross(Vector value); + + /// uint64_t svaddv[_u8](svbool_t pg, svuint8_t op) : "UADDV Dresult, Pg, Zop.B" + public static unsafe Vector AddAcross(Vector value); + + /// uint64_t svaddv[_u16](svbool_t pg, svuint16_t op) : "UADDV Dresult, Pg, Zop.H" + public static unsafe Vector AddAcross(Vector value); + + /// uint64_t svaddv[_u32](svbool_t pg, svuint32_t op) : "UADDV Dresult, Pg, Zop.S" + public static unsafe Vector AddAcross(Vector value); + + /// uint64_t svaddv[_u64](svbool_t pg, svuint64_t op) : "UADDV Dresult, Pg, Zop.D" + public static unsafe Vector AddAcross(Vector value); + + + /// AddSaturate : Saturating add + + /// svint8_t svqadd[_s8](svint8_t op1, svint8_t op2) : "SQADD Zresult.B, Zop1.B, Zop2.B" + public static unsafe Vector AddSaturate(Vector left, Vector right); + + /// svint16_t svqadd[_s16](svint16_t op1, svint16_t op2) : "SQADD Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector AddSaturate(Vector left, Vector right); + + /// svint32_t svqadd[_s32](svint32_t op1, svint32_t op2) : "SQADD Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector AddSaturate(Vector left, Vector right); + + /// svint64_t svqadd[_s64](svint64_t op1, svint64_t op2) : "SQADD Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector AddSaturate(Vector left, Vector right); + + /// svuint8_t svqadd[_u8](svuint8_t op1, svuint8_t op2) : "UQADD Zresult.B, Zop1.B, Zop2.B" + public static unsafe Vector AddSaturate(Vector left, Vector right); + + /// svuint16_t svqadd[_u16](svuint16_t op1, svuint16_t op2) : "UQADD Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector AddSaturate(Vector left, Vector right); + + /// svuint32_t svqadd[_u32](svuint32_t op1, svuint32_t op2) : "UQADD Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector AddSaturate(Vector left, Vector right); + + /// svuint64_t svqadd[_u64](svuint64_t op1, svuint64_t op2) : "UQADD Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector AddSaturate(Vector left, Vector right); + + + /// Divide : Divide + + /// svfloat32_t svdiv[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FDIV Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; FDIV Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svfloat32_t svdiv[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FDIV Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "FDIVR Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; FDIV Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svfloat32_t svdiv[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; FDIV Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; FDIVR Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector Divide(Vector left, Vector right); + + /// svfloat64_t svdiv[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FDIV Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; FDIV Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svfloat64_t svdiv[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FDIV Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "FDIVR Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; FDIV Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svfloat64_t svdiv[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; FDIV Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; FDIVR Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector Divide(Vector left, Vector right); + + /// svint32_t svdiv[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) : "SDIV Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; SDIV Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svdiv[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) : "SDIV Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "SDIVR Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; SDIV Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svdiv[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; SDIV Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; SDIVR Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector Divide(Vector left, Vector right); + + /// svint64_t svdiv[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) : "SDIV Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; SDIV Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svdiv[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) : "SDIV Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "SDIVR Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; SDIV Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svdiv[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; SDIV Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; SDIVR Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector Divide(Vector left, Vector right); + + /// svuint32_t svdiv[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) : "UDIV Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; UDIV Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svdiv[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) : "UDIV Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "UDIVR Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; UDIV Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svdiv[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; UDIV Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; UDIVR Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector Divide(Vector left, Vector right); + + /// svuint64_t svdiv[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) : "UDIV Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; UDIV Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svdiv[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) : "UDIV Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "UDIVR Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; UDIV Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svdiv[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; UDIV Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; UDIVR Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector Divide(Vector left, Vector right); + + + /// DotProduct : Dot product + + /// svint32_t svdot[_s32](svint32_t op1, svint8_t op2, svint8_t op3) : "SDOT Ztied1.S, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; SDOT Zresult.S, Zop2.B, Zop3.B" + public static unsafe Vector DotProduct(Vector addend, Vector left, Vector right); + + /// svint64_t svdot[_s64](svint64_t op1, svint16_t op2, svint16_t op3) : "SDOT Ztied1.D, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; SDOT Zresult.D, Zop2.H, Zop3.H" + public static unsafe Vector DotProduct(Vector addend, Vector left, Vector right); + + /// svuint32_t svdot[_u32](svuint32_t op1, svuint8_t op2, svuint8_t op3) : "UDOT Ztied1.S, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; UDOT Zresult.S, Zop2.B, Zop3.B" + public static unsafe Vector DotProduct(Vector addend, Vector left, Vector right); + + /// svuint64_t svdot[_u64](svuint64_t op1, svuint16_t op2, svuint16_t op3) : "UDOT Ztied1.D, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; UDOT Zresult.D, Zop2.H, Zop3.H" + public static unsafe Vector DotProduct(Vector addend, Vector left, Vector right); + + + /// DotProductBySelectedScalar : Dot product + + /// svint32_t svdot_lane[_s32](svint32_t op1, svint8_t op2, svint8_t op3, uint64_t imm_index) : "SDOT Ztied1.S, Zop2.B, Zop3.B[imm_index]" or "MOVPRFX Zresult, Zop1; SDOT Zresult.S, Zop2.B, Zop3.B[imm_index]" + public static unsafe Vector DotProductBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// svint64_t svdot_lane[_s64](svint64_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) : "SDOT Ztied1.D, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; SDOT Zresult.D, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector DotProductBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// svuint32_t svdot_lane[_u32](svuint32_t op1, svuint8_t op2, svuint8_t op3, uint64_t imm_index) : "UDOT Ztied1.S, Zop2.B, Zop3.B[imm_index]" or "MOVPRFX Zresult, Zop1; UDOT Zresult.S, Zop2.B, Zop3.B[imm_index]" + public static unsafe Vector DotProductBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// svuint64_t svdot_lane[_u64](svuint64_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) : "UDOT Ztied1.D, Zop2.H, Zop3.H[imm_index]" or "MOVPRFX Zresult, Zop1; UDOT Zresult.D, Zop2.H, Zop3.H[imm_index]" + public static unsafe Vector DotProductBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + + /// FusedMultiplyAdd : Multiply-add, addend first + + /// svfloat32_t svmla[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) : "FMLA Ztied1.S, Pg/M, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; FMLA Zresult.S, Pg/M, Zop2.S, Zop3.S" + /// svfloat32_t svmla[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) : "FMLA Ztied1.S, Pg/M, Zop2.S, Zop3.S" or "FMAD Ztied2.S, Pg/M, Zop3.S, Zop1.S" or "FMAD Ztied3.S, Pg/M, Zop2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; FMLA Zresult.S, Pg/M, Zop2.S, Zop3.S" + /// svfloat32_t svmla[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; FMLA Zresult.S, Pg/M, Zop2.S, Zop3.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; FMAD Zresult.S, Pg/M, Zop3.S, Zop1.S" or "MOVPRFX Zresult.S, Pg/Z, Zop3.S; FMAD Zresult.S, Pg/M, Zop2.S, Zop1.S" + public static unsafe Vector FusedMultiplyAdd(Vector addend, Vector left, Vector right); + + /// svfloat64_t svmla[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) : "FMLA Ztied1.D, Pg/M, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; FMLA Zresult.D, Pg/M, Zop2.D, Zop3.D" + /// svfloat64_t svmla[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) : "FMLA Ztied1.D, Pg/M, Zop2.D, Zop3.D" or "FMAD Ztied2.D, Pg/M, Zop3.D, Zop1.D" or "FMAD Ztied3.D, Pg/M, Zop2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; FMLA Zresult.D, Pg/M, Zop2.D, Zop3.D" + /// svfloat64_t svmla[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; FMLA Zresult.D, Pg/M, Zop2.D, Zop3.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; FMAD Zresult.D, Pg/M, Zop3.D, Zop1.D" or "MOVPRFX Zresult.D, Pg/Z, Zop3.D; FMAD Zresult.D, Pg/M, Zop2.D, Zop1.D" + public static unsafe Vector FusedMultiplyAdd(Vector addend, Vector left, Vector right); + + + /// FusedMultiplyAddBySelectedScalar : Multiply-add, addend first + + /// svfloat32_t svmla_lane[_f32](svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_index) : "FMLA Ztied1.S, Zop2.S, Zop3.S[imm_index]" or "MOVPRFX Zresult, Zop1; FMLA Zresult.S, Zop2.S, Zop3.S[imm_index]" + public static unsafe Vector FusedMultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// svfloat64_t svmla_lane[_f64](svfloat64_t op1, svfloat64_t op2, svfloat64_t op3, uint64_t imm_index) : "FMLA Ztied1.D, Zop2.D, Zop3.D[imm_index]" or "MOVPRFX Zresult, Zop1; FMLA Zresult.D, Zop2.D, Zop3.D[imm_index]" + public static unsafe Vector FusedMultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + + /// FusedMultiplyAddNegated : Negated multiply-add, addend first + + /// svfloat32_t svnmla[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) : "FNMLA Ztied1.S, Pg/M, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; FNMLA Zresult.S, Pg/M, Zop2.S, Zop3.S" + /// svfloat32_t svnmla[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) : "FNMLA Ztied1.S, Pg/M, Zop2.S, Zop3.S" or "FNMAD Ztied2.S, Pg/M, Zop3.S, Zop1.S" or "FNMAD Ztied3.S, Pg/M, Zop2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; FNMLA Zresult.S, Pg/M, Zop2.S, Zop3.S" + /// svfloat32_t svnmla[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; FNMLA Zresult.S, Pg/M, Zop2.S, Zop3.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; FNMAD Zresult.S, Pg/M, Zop3.S, Zop1.S" or "MOVPRFX Zresult.S, Pg/Z, Zop3.S; FNMAD Zresult.S, Pg/M, Zop2.S, Zop1.S" + public static unsafe Vector FusedMultiplyAddNegated(Vector addend, Vector left, Vector right); + + /// svfloat64_t svnmla[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) : "FNMLA Ztied1.D, Pg/M, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; FNMLA Zresult.D, Pg/M, Zop2.D, Zop3.D" + /// svfloat64_t svnmla[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) : "FNMLA Ztied1.D, Pg/M, Zop2.D, Zop3.D" or "FNMAD Ztied2.D, Pg/M, Zop3.D, Zop1.D" or "FNMAD Ztied3.D, Pg/M, Zop2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; FNMLA Zresult.D, Pg/M, Zop2.D, Zop3.D" + /// svfloat64_t svnmla[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; FNMLA Zresult.D, Pg/M, Zop2.D, Zop3.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; FNMAD Zresult.D, Pg/M, Zop3.D, Zop1.D" or "MOVPRFX Zresult.D, Pg/Z, Zop3.D; FNMAD Zresult.D, Pg/M, Zop2.D, Zop1.D" + public static unsafe Vector FusedMultiplyAddNegated(Vector addend, Vector left, Vector right); + + + /// FusedMultiplySubtract : Multiply-subtract, minuend first + + /// svfloat32_t svmls[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) : "FMLS Ztied1.S, Pg/M, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; FMLS Zresult.S, Pg/M, Zop2.S, Zop3.S" + /// svfloat32_t svmls[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) : "FMLS Ztied1.S, Pg/M, Zop2.S, Zop3.S" or "FMSB Ztied2.S, Pg/M, Zop3.S, Zop1.S" or "FMSB Ztied3.S, Pg/M, Zop2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; FMLS Zresult.S, Pg/M, Zop2.S, Zop3.S" + /// svfloat32_t svmls[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; FMLS Zresult.S, Pg/M, Zop2.S, Zop3.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; FMSB Zresult.S, Pg/M, Zop3.S, Zop1.S" or "MOVPRFX Zresult.S, Pg/Z, Zop3.S; FMSB Zresult.S, Pg/M, Zop2.S, Zop1.S" + public static unsafe Vector FusedMultiplySubtract(Vector minuend, Vector left, Vector right); + + /// svfloat64_t svmls[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) : "FMLS Ztied1.D, Pg/M, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; FMLS Zresult.D, Pg/M, Zop2.D, Zop3.D" + /// svfloat64_t svmls[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) : "FMLS Ztied1.D, Pg/M, Zop2.D, Zop3.D" or "FMSB Ztied2.D, Pg/M, Zop3.D, Zop1.D" or "FMSB Ztied3.D, Pg/M, Zop2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; FMLS Zresult.D, Pg/M, Zop2.D, Zop3.D" + /// svfloat64_t svmls[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; FMLS Zresult.D, Pg/M, Zop2.D, Zop3.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; FMSB Zresult.D, Pg/M, Zop3.D, Zop1.D" or "MOVPRFX Zresult.D, Pg/Z, Zop3.D; FMSB Zresult.D, Pg/M, Zop2.D, Zop1.D" + public static unsafe Vector FusedMultiplySubtract(Vector minuend, Vector left, Vector right); + + + /// FusedMultiplySubtractBySelectedScalar : Multiply-subtract, minuend first + + /// svfloat32_t svmls_lane[_f32](svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_index) : "FMLS Ztied1.S, Zop2.S, Zop3.S[imm_index]" or "MOVPRFX Zresult, Zop1; FMLS Zresult.S, Zop2.S, Zop3.S[imm_index]" + public static unsafe Vector FusedMultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// svfloat64_t svmls_lane[_f64](svfloat64_t op1, svfloat64_t op2, svfloat64_t op3, uint64_t imm_index) : "FMLS Ztied1.D, Zop2.D, Zop3.D[imm_index]" or "MOVPRFX Zresult, Zop1; FMLS Zresult.D, Zop2.D, Zop3.D[imm_index]" + public static unsafe Vector FusedMultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + + /// FusedMultiplySubtractNegated : Negated multiply-subtract, minuend first + + /// svfloat32_t svnmls[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) : "FNMLS Ztied1.S, Pg/M, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; FNMLS Zresult.S, Pg/M, Zop2.S, Zop3.S" + /// svfloat32_t svnmls[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) : "FNMLS Ztied1.S, Pg/M, Zop2.S, Zop3.S" or "FNMSB Ztied2.S, Pg/M, Zop3.S, Zop1.S" or "FNMSB Ztied3.S, Pg/M, Zop2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; FNMLS Zresult.S, Pg/M, Zop2.S, Zop3.S" + /// svfloat32_t svnmls[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; FNMLS Zresult.S, Pg/M, Zop2.S, Zop3.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; FNMSB Zresult.S, Pg/M, Zop3.S, Zop1.S" or "MOVPRFX Zresult.S, Pg/Z, Zop3.S; FNMSB Zresult.S, Pg/M, Zop2.S, Zop1.S" + public static unsafe Vector FusedMultiplySubtractNegated(Vector minuend, Vector left, Vector right); + + /// svfloat64_t svnmls[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) : "FNMLS Ztied1.D, Pg/M, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; FNMLS Zresult.D, Pg/M, Zop2.D, Zop3.D" + /// svfloat64_t svnmls[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) : "FNMLS Ztied1.D, Pg/M, Zop2.D, Zop3.D" or "FNMSB Ztied2.D, Pg/M, Zop3.D, Zop1.D" or "FNMSB Ztied3.D, Pg/M, Zop2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; FNMLS Zresult.D, Pg/M, Zop2.D, Zop3.D" + /// svfloat64_t svnmls[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; FNMLS Zresult.D, Pg/M, Zop2.D, Zop3.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; FNMSB Zresult.D, Pg/M, Zop3.D, Zop1.D" or "MOVPRFX Zresult.D, Pg/Z, Zop3.D; FNMSB Zresult.D, Pg/M, Zop2.D, Zop1.D" + public static unsafe Vector FusedMultiplySubtractNegated(Vector minuend, Vector left, Vector right); + + + /// Max : Maximum + + /// svfloat32_t svmax[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FMAX Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; FMAX Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svfloat32_t svmax[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FMAX Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "FMAX Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; FMAX Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svfloat32_t svmax[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; FMAX Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; FMAX Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector Max(Vector left, Vector right); + + /// svfloat64_t svmax[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FMAX Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; FMAX Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svfloat64_t svmax[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FMAX Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "FMAX Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; FMAX Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svfloat64_t svmax[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; FMAX Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; FMAX Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector Max(Vector left, Vector right); + + /// svint8_t svmax[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) : "SMAX Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; SMAX Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svmax[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) : "SMAX Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "SMAX Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; SMAX Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svmax[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; SMAX Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; SMAX Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector Max(Vector left, Vector right); + + /// svint16_t svmax[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) : "SMAX Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; SMAX Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svmax[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) : "SMAX Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "SMAX Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; SMAX Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svmax[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; SMAX Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; SMAX Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector Max(Vector left, Vector right); + + /// svint32_t svmax[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) : "SMAX Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; SMAX Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svmax[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) : "SMAX Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "SMAX Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; SMAX Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svmax[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; SMAX Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; SMAX Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector Max(Vector left, Vector right); + + /// svint64_t svmax[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) : "SMAX Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; SMAX Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svmax[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) : "SMAX Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "SMAX Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; SMAX Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svmax[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; SMAX Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; SMAX Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector Max(Vector left, Vector right); + + /// svuint8_t svmax[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) : "UMAX Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; UMAX Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svmax[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) : "UMAX Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "UMAX Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; UMAX Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svmax[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; UMAX Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; UMAX Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector Max(Vector left, Vector right); + + /// svuint16_t svmax[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) : "UMAX Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; UMAX Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svmax[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) : "UMAX Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "UMAX Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; UMAX Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svmax[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; UMAX Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; UMAX Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector Max(Vector left, Vector right); + + /// svuint32_t svmax[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) : "UMAX Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; UMAX Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svmax[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) : "UMAX Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "UMAX Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; UMAX Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svmax[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; UMAX Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; UMAX Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector Max(Vector left, Vector right); + + /// svuint64_t svmax[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) : "UMAX Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; UMAX Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svmax[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) : "UMAX Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "UMAX Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; UMAX Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svmax[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; UMAX Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; UMAX Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector Max(Vector left, Vector right); + + + /// MaxAcross : Maximum reduction to scalar + + /// float32_t svmaxv[_f32](svbool_t pg, svfloat32_t op) : "FMAXV Sresult, Pg, Zop.S" + public static unsafe Vector MaxAcross(Vector value); + + /// float64_t svmaxv[_f64](svbool_t pg, svfloat64_t op) : "FMAXV Dresult, Pg, Zop.D" + public static unsafe Vector MaxAcross(Vector value); + + /// int8_t svmaxv[_s8](svbool_t pg, svint8_t op) : "SMAXV Bresult, Pg, Zop.B" + public static unsafe Vector MaxAcross(Vector value); + + /// int16_t svmaxv[_s16](svbool_t pg, svint16_t op) : "SMAXV Hresult, Pg, Zop.H" + public static unsafe Vector MaxAcross(Vector value); + + /// int32_t svmaxv[_s32](svbool_t pg, svint32_t op) : "SMAXV Sresult, Pg, Zop.S" + public static unsafe Vector MaxAcross(Vector value); + + /// int64_t svmaxv[_s64](svbool_t pg, svint64_t op) : "SMAXV Dresult, Pg, Zop.D" + public static unsafe Vector MaxAcross(Vector value); + + /// uint8_t svmaxv[_u8](svbool_t pg, svuint8_t op) : "UMAXV Bresult, Pg, Zop.B" + public static unsafe Vector MaxAcross(Vector value); + + /// uint16_t svmaxv[_u16](svbool_t pg, svuint16_t op) : "UMAXV Hresult, Pg, Zop.H" + public static unsafe Vector MaxAcross(Vector value); + + /// uint32_t svmaxv[_u32](svbool_t pg, svuint32_t op) : "UMAXV Sresult, Pg, Zop.S" + public static unsafe Vector MaxAcross(Vector value); + + /// uint64_t svmaxv[_u64](svbool_t pg, svuint64_t op) : "UMAXV Dresult, Pg, Zop.D" + public static unsafe Vector MaxAcross(Vector value); + + + /// MaxNumber : Maximum number + + /// svfloat32_t svmaxnm[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FMAXNM Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; FMAXNM Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svfloat32_t svmaxnm[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FMAXNM Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "FMAXNM Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; FMAXNM Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svfloat32_t svmaxnm[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; FMAXNM Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; FMAXNM Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector MaxNumber(Vector left, Vector right); + + /// svfloat64_t svmaxnm[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FMAXNM Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; FMAXNM Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svfloat64_t svmaxnm[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FMAXNM Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "FMAXNM Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; FMAXNM Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svfloat64_t svmaxnm[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; FMAXNM Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; FMAXNM Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector MaxNumber(Vector left, Vector right); + + + /// MaxNumberAcross : Maximum number reduction to scalar + + /// float32_t svmaxnmv[_f32](svbool_t pg, svfloat32_t op) : "FMAXNMV Sresult, Pg, Zop.S" + public static unsafe Vector MaxNumberAcross(Vector value); + + /// float64_t svmaxnmv[_f64](svbool_t pg, svfloat64_t op) : "FMAXNMV Dresult, Pg, Zop.D" + public static unsafe Vector MaxNumberAcross(Vector value); + + + /// Min : Minimum + + /// svfloat32_t svmin[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FMIN Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; FMIN Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svfloat32_t svmin[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FMIN Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "FMIN Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; FMIN Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svfloat32_t svmin[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; FMIN Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; FMIN Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector Min(Vector left, Vector right); + + /// svfloat64_t svmin[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FMIN Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; FMIN Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svfloat64_t svmin[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FMIN Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "FMIN Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; FMIN Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svfloat64_t svmin[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; FMIN Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; FMIN Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector Min(Vector left, Vector right); + + /// svint8_t svmin[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) : "SMIN Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; SMIN Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svmin[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) : "SMIN Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "SMIN Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; SMIN Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svmin[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; SMIN Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; SMIN Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector Min(Vector left, Vector right); + + /// svint16_t svmin[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) : "SMIN Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; SMIN Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svmin[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) : "SMIN Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "SMIN Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; SMIN Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svmin[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; SMIN Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; SMIN Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector Min(Vector left, Vector right); + + /// svint32_t svmin[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) : "SMIN Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; SMIN Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svmin[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) : "SMIN Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "SMIN Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; SMIN Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svmin[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; SMIN Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; SMIN Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector Min(Vector left, Vector right); + + /// svint64_t svmin[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) : "SMIN Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; SMIN Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svmin[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) : "SMIN Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "SMIN Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; SMIN Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svmin[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; SMIN Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; SMIN Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector Min(Vector left, Vector right); + + /// svuint8_t svmin[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) : "UMIN Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; UMIN Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svmin[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) : "UMIN Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "UMIN Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; UMIN Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svmin[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; UMIN Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; UMIN Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector Min(Vector left, Vector right); + + /// svuint16_t svmin[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) : "UMIN Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; UMIN Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svmin[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) : "UMIN Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "UMIN Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; UMIN Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svmin[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; UMIN Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; UMIN Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector Min(Vector left, Vector right); + + /// svuint32_t svmin[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) : "UMIN Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; UMIN Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svmin[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) : "UMIN Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "UMIN Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; UMIN Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svmin[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; UMIN Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; UMIN Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector Min(Vector left, Vector right); + + /// svuint64_t svmin[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) : "UMIN Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; UMIN Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svmin[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) : "UMIN Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "UMIN Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; UMIN Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svmin[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; UMIN Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; UMIN Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector Min(Vector left, Vector right); + + + /// MinAcross : Minimum reduction to scalar + + /// float32_t svminv[_f32](svbool_t pg, svfloat32_t op) : "FMINV Sresult, Pg, Zop.S" + public static unsafe Vector MinAcross(Vector value); + + /// float64_t svminv[_f64](svbool_t pg, svfloat64_t op) : "FMINV Dresult, Pg, Zop.D" + public static unsafe Vector MinAcross(Vector value); + + /// int8_t svminv[_s8](svbool_t pg, svint8_t op) : "SMINV Bresult, Pg, Zop.B" + public static unsafe Vector MinAcross(Vector value); + + /// int16_t svminv[_s16](svbool_t pg, svint16_t op) : "SMINV Hresult, Pg, Zop.H" + public static unsafe Vector MinAcross(Vector value); + + /// int32_t svminv[_s32](svbool_t pg, svint32_t op) : "SMINV Sresult, Pg, Zop.S" + public static unsafe Vector MinAcross(Vector value); + + /// int64_t svminv[_s64](svbool_t pg, svint64_t op) : "SMINV Dresult, Pg, Zop.D" + public static unsafe Vector MinAcross(Vector value); + + /// uint8_t svminv[_u8](svbool_t pg, svuint8_t op) : "UMINV Bresult, Pg, Zop.B" + public static unsafe Vector MinAcross(Vector value); + + /// uint16_t svminv[_u16](svbool_t pg, svuint16_t op) : "UMINV Hresult, Pg, Zop.H" + public static unsafe Vector MinAcross(Vector value); + + /// uint32_t svminv[_u32](svbool_t pg, svuint32_t op) : "UMINV Sresult, Pg, Zop.S" + public static unsafe Vector MinAcross(Vector value); + + /// uint64_t svminv[_u64](svbool_t pg, svuint64_t op) : "UMINV Dresult, Pg, Zop.D" + public static unsafe Vector MinAcross(Vector value); + + + /// MinNumber : Minimum number + + /// svfloat32_t svminnm[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FMINNM Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; FMINNM Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svfloat32_t svminnm[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FMINNM Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "FMINNM Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; FMINNM Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svfloat32_t svminnm[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; FMINNM Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; FMINNM Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector MinNumber(Vector left, Vector right); + + /// svfloat64_t svminnm[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FMINNM Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; FMINNM Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svfloat64_t svminnm[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FMINNM Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "FMINNM Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; FMINNM Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svfloat64_t svminnm[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; FMINNM Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; FMINNM Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector MinNumber(Vector left, Vector right); + + + /// MinNumberAcross : Minimum number reduction to scalar + + /// float32_t svminnmv[_f32](svbool_t pg, svfloat32_t op) : "FMINNMV Sresult, Pg, Zop.S" + public static unsafe Vector MinNumberAcross(Vector value); + + /// float64_t svminnmv[_f64](svbool_t pg, svfloat64_t op) : "FMINNMV Dresult, Pg, Zop.D" + public static unsafe Vector MinNumberAcross(Vector value); + + + /// Multiply : Multiply + + /// svfloat32_t svmul[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FMUL Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; FMUL Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svfloat32_t svmul[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FMUL Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "FMUL Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "FMUL Zresult.S, Zop1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; FMUL Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svfloat32_t svmul[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; FMUL Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; FMUL Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector Multiply(Vector left, Vector right); + + /// svfloat64_t svmul[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FMUL Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; FMUL Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svfloat64_t svmul[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FMUL Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "FMUL Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "FMUL Zresult.D, Zop1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; FMUL Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svfloat64_t svmul[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; FMUL Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; FMUL Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector Multiply(Vector left, Vector right); + + /// svint8_t svmul[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) : "MUL Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; MUL Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svmul[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) : "MUL Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MUL Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; MUL Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svmul[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; MUL Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; MUL Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector Multiply(Vector left, Vector right); + + /// svint16_t svmul[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) : "MUL Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; MUL Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svmul[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) : "MUL Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MUL Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; MUL Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svmul[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; MUL Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; MUL Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector Multiply(Vector left, Vector right); + + /// svint32_t svmul[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) : "MUL Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; MUL Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svmul[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) : "MUL Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MUL Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; MUL Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svmul[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; MUL Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; MUL Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector Multiply(Vector left, Vector right); + + /// svint64_t svmul[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) : "MUL Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; MUL Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svmul[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) : "MUL Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MUL Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; MUL Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svmul[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; MUL Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; MUL Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector Multiply(Vector left, Vector right); + + /// svuint8_t svmul[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) : "MUL Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; MUL Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svmul[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) : "MUL Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MUL Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; MUL Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svmul[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; MUL Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; MUL Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector Multiply(Vector left, Vector right); + + /// svuint16_t svmul[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) : "MUL Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; MUL Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svmul[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) : "MUL Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MUL Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; MUL Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svmul[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; MUL Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; MUL Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector Multiply(Vector left, Vector right); + + /// svuint32_t svmul[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) : "MUL Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; MUL Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svmul[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) : "MUL Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MUL Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; MUL Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svmul[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; MUL Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; MUL Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector Multiply(Vector left, Vector right); + + /// svuint64_t svmul[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) : "MUL Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; MUL Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svmul[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) : "MUL Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MUL Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; MUL Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svmul[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; MUL Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; MUL Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector Multiply(Vector left, Vector right); + + + /// MultiplyAdd : Multiply-add, addend first + + /// svint8_t svmla[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3) : "MLA Ztied1.B, Pg/M, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; MLA Zresult.B, Pg/M, Zop2.B, Zop3.B" + /// svint8_t svmla[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3) : "MLA Ztied1.B, Pg/M, Zop2.B, Zop3.B" or "MAD Ztied2.B, Pg/M, Zop3.B, Zop1.B" or "MAD Ztied3.B, Pg/M, Zop2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; MLA Zresult.B, Pg/M, Zop2.B, Zop3.B" + /// svint8_t svmla[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; MLA Zresult.B, Pg/M, Zop2.B, Zop3.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; MAD Zresult.B, Pg/M, Zop3.B, Zop1.B" or "MOVPRFX Zresult.B, Pg/Z, Zop3.B; MAD Zresult.B, Pg/M, Zop2.B, Zop1.B" + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right); + + /// svint16_t svmla[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3) : "MLA Ztied1.H, Pg/M, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; MLA Zresult.H, Pg/M, Zop2.H, Zop3.H" + /// svint16_t svmla[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3) : "MLA Ztied1.H, Pg/M, Zop2.H, Zop3.H" or "MAD Ztied2.H, Pg/M, Zop3.H, Zop1.H" or "MAD Ztied3.H, Pg/M, Zop2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; MLA Zresult.H, Pg/M, Zop2.H, Zop3.H" + /// svint16_t svmla[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; MLA Zresult.H, Pg/M, Zop2.H, Zop3.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; MAD Zresult.H, Pg/M, Zop3.H, Zop1.H" or "MOVPRFX Zresult.H, Pg/Z, Zop3.H; MAD Zresult.H, Pg/M, Zop2.H, Zop1.H" + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right); + + /// svint32_t svmla[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3) : "MLA Ztied1.S, Pg/M, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; MLA Zresult.S, Pg/M, Zop2.S, Zop3.S" + /// svint32_t svmla[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3) : "MLA Ztied1.S, Pg/M, Zop2.S, Zop3.S" or "MAD Ztied2.S, Pg/M, Zop3.S, Zop1.S" or "MAD Ztied3.S, Pg/M, Zop2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; MLA Zresult.S, Pg/M, Zop2.S, Zop3.S" + /// svint32_t svmla[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; MLA Zresult.S, Pg/M, Zop2.S, Zop3.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; MAD Zresult.S, Pg/M, Zop3.S, Zop1.S" or "MOVPRFX Zresult.S, Pg/Z, Zop3.S; MAD Zresult.S, Pg/M, Zop2.S, Zop1.S" + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right); + + /// svint64_t svmla[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3) : "MLA Ztied1.D, Pg/M, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; MLA Zresult.D, Pg/M, Zop2.D, Zop3.D" + /// svint64_t svmla[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3) : "MLA Ztied1.D, Pg/M, Zop2.D, Zop3.D" or "MAD Ztied2.D, Pg/M, Zop3.D, Zop1.D" or "MAD Ztied3.D, Pg/M, Zop2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; MLA Zresult.D, Pg/M, Zop2.D, Zop3.D" + /// svint64_t svmla[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; MLA Zresult.D, Pg/M, Zop2.D, Zop3.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; MAD Zresult.D, Pg/M, Zop3.D, Zop1.D" or "MOVPRFX Zresult.D, Pg/Z, Zop3.D; MAD Zresult.D, Pg/M, Zop2.D, Zop1.D" + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right); + + /// svuint8_t svmla[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3) : "MLA Ztied1.B, Pg/M, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; MLA Zresult.B, Pg/M, Zop2.B, Zop3.B" + /// svuint8_t svmla[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3) : "MLA Ztied1.B, Pg/M, Zop2.B, Zop3.B" or "MAD Ztied2.B, Pg/M, Zop3.B, Zop1.B" or "MAD Ztied3.B, Pg/M, Zop2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; MLA Zresult.B, Pg/M, Zop2.B, Zop3.B" + /// svuint8_t svmla[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; MLA Zresult.B, Pg/M, Zop2.B, Zop3.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; MAD Zresult.B, Pg/M, Zop3.B, Zop1.B" or "MOVPRFX Zresult.B, Pg/Z, Zop3.B; MAD Zresult.B, Pg/M, Zop2.B, Zop1.B" + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right); + + /// svuint16_t svmla[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3) : "MLA Ztied1.H, Pg/M, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; MLA Zresult.H, Pg/M, Zop2.H, Zop3.H" + /// svuint16_t svmla[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3) : "MLA Ztied1.H, Pg/M, Zop2.H, Zop3.H" or "MAD Ztied2.H, Pg/M, Zop3.H, Zop1.H" or "MAD Ztied3.H, Pg/M, Zop2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; MLA Zresult.H, Pg/M, Zop2.H, Zop3.H" + /// svuint16_t svmla[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; MLA Zresult.H, Pg/M, Zop2.H, Zop3.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; MAD Zresult.H, Pg/M, Zop3.H, Zop1.H" or "MOVPRFX Zresult.H, Pg/Z, Zop3.H; MAD Zresult.H, Pg/M, Zop2.H, Zop1.H" + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right); + + /// svuint32_t svmla[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3) : "MLA Ztied1.S, Pg/M, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; MLA Zresult.S, Pg/M, Zop2.S, Zop3.S" + /// svuint32_t svmla[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3) : "MLA Ztied1.S, Pg/M, Zop2.S, Zop3.S" or "MAD Ztied2.S, Pg/M, Zop3.S, Zop1.S" or "MAD Ztied3.S, Pg/M, Zop2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; MLA Zresult.S, Pg/M, Zop2.S, Zop3.S" + /// svuint32_t svmla[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; MLA Zresult.S, Pg/M, Zop2.S, Zop3.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; MAD Zresult.S, Pg/M, Zop3.S, Zop1.S" or "MOVPRFX Zresult.S, Pg/Z, Zop3.S; MAD Zresult.S, Pg/M, Zop2.S, Zop1.S" + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right); + + /// svuint64_t svmla[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3) : "MLA Ztied1.D, Pg/M, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; MLA Zresult.D, Pg/M, Zop2.D, Zop3.D" + /// svuint64_t svmla[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3) : "MLA Ztied1.D, Pg/M, Zop2.D, Zop3.D" or "MAD Ztied2.D, Pg/M, Zop3.D, Zop1.D" or "MAD Ztied3.D, Pg/M, Zop2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; MLA Zresult.D, Pg/M, Zop2.D, Zop3.D" + /// svuint64_t svmla[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; MLA Zresult.D, Pg/M, Zop2.D, Zop3.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; MAD Zresult.D, Pg/M, Zop3.D, Zop1.D" or "MOVPRFX Zresult.D, Pg/Z, Zop3.D; MAD Zresult.D, Pg/M, Zop2.D, Zop1.D" + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right); + + + /// MultiplyBySelectedScalar : Multiply + + /// svfloat32_t svmul_lane[_f32](svfloat32_t op1, svfloat32_t op2, uint64_t imm_index) : "FMUL Zresult.S, Zop1.S, Zop2.S[imm_index]" + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// svfloat64_t svmul_lane[_f64](svfloat64_t op1, svfloat64_t op2, uint64_t imm_index) : "FMUL Zresult.D, Zop1.D, Zop2.D[imm_index]" + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex); + + + /// MultiplyExtended : Multiply extended (∞×0=2) + + /// svfloat32_t svmulx[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FMULX Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; FMULX Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svfloat32_t svmulx[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FMULX Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "FMULX Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; FMULX Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svfloat32_t svmulx[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; FMULX Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; FMULX Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector MultiplyExtended(Vector left, Vector right); + + /// svfloat64_t svmulx[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FMULX Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; FMULX Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svfloat64_t svmulx[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FMULX Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "FMULX Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; FMULX Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svfloat64_t svmulx[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; FMULX Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; FMULX Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector MultiplyExtended(Vector left, Vector right); + + + /// MultiplySubtract : Multiply-subtract, minuend first + + /// svint8_t svmls[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3) : "MLS Ztied1.B, Pg/M, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; MLS Zresult.B, Pg/M, Zop2.B, Zop3.B" + /// svint8_t svmls[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3) : "MLS Ztied1.B, Pg/M, Zop2.B, Zop3.B" or "MSB Ztied2.B, Pg/M, Zop3.B, Zop1.B" or "MSB Ztied3.B, Pg/M, Zop2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; MLS Zresult.B, Pg/M, Zop2.B, Zop3.B" + /// svint8_t svmls[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; MLS Zresult.B, Pg/M, Zop2.B, Zop3.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; MSB Zresult.B, Pg/M, Zop3.B, Zop1.B" or "MOVPRFX Zresult.B, Pg/Z, Zop3.B; MSB Zresult.B, Pg/M, Zop2.B, Zop1.B" + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right); + + /// svint16_t svmls[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3) : "MLS Ztied1.H, Pg/M, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; MLS Zresult.H, Pg/M, Zop2.H, Zop3.H" + /// svint16_t svmls[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3) : "MLS Ztied1.H, Pg/M, Zop2.H, Zop3.H" or "MSB Ztied2.H, Pg/M, Zop3.H, Zop1.H" or "MSB Ztied3.H, Pg/M, Zop2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; MLS Zresult.H, Pg/M, Zop2.H, Zop3.H" + /// svint16_t svmls[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; MLS Zresult.H, Pg/M, Zop2.H, Zop3.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; MSB Zresult.H, Pg/M, Zop3.H, Zop1.H" or "MOVPRFX Zresult.H, Pg/Z, Zop3.H; MSB Zresult.H, Pg/M, Zop2.H, Zop1.H" + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right); + + /// svint32_t svmls[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3) : "MLS Ztied1.S, Pg/M, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; MLS Zresult.S, Pg/M, Zop2.S, Zop3.S" + /// svint32_t svmls[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3) : "MLS Ztied1.S, Pg/M, Zop2.S, Zop3.S" or "MSB Ztied2.S, Pg/M, Zop3.S, Zop1.S" or "MSB Ztied3.S, Pg/M, Zop2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; MLS Zresult.S, Pg/M, Zop2.S, Zop3.S" + /// svint32_t svmls[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; MLS Zresult.S, Pg/M, Zop2.S, Zop3.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; MSB Zresult.S, Pg/M, Zop3.S, Zop1.S" or "MOVPRFX Zresult.S, Pg/Z, Zop3.S; MSB Zresult.S, Pg/M, Zop2.S, Zop1.S" + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right); + + /// svint64_t svmls[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3) : "MLS Ztied1.D, Pg/M, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; MLS Zresult.D, Pg/M, Zop2.D, Zop3.D" + /// svint64_t svmls[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3) : "MLS Ztied1.D, Pg/M, Zop2.D, Zop3.D" or "MSB Ztied2.D, Pg/M, Zop3.D, Zop1.D" or "MSB Ztied3.D, Pg/M, Zop2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; MLS Zresult.D, Pg/M, Zop2.D, Zop3.D" + /// svint64_t svmls[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; MLS Zresult.D, Pg/M, Zop2.D, Zop3.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; MSB Zresult.D, Pg/M, Zop3.D, Zop1.D" or "MOVPRFX Zresult.D, Pg/Z, Zop3.D; MSB Zresult.D, Pg/M, Zop2.D, Zop1.D" + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right); + + /// svuint8_t svmls[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3) : "MLS Ztied1.B, Pg/M, Zop2.B, Zop3.B" or "MOVPRFX Zresult, Zop1; MLS Zresult.B, Pg/M, Zop2.B, Zop3.B" + /// svuint8_t svmls[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3) : "MLS Ztied1.B, Pg/M, Zop2.B, Zop3.B" or "MSB Ztied2.B, Pg/M, Zop3.B, Zop1.B" or "MSB Ztied3.B, Pg/M, Zop2.B, Zop1.B" or "MOVPRFX Zresult, Zop1; MLS Zresult.B, Pg/M, Zop2.B, Zop3.B" + /// svuint8_t svmls[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; MLS Zresult.B, Pg/M, Zop2.B, Zop3.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; MSB Zresult.B, Pg/M, Zop3.B, Zop1.B" or "MOVPRFX Zresult.B, Pg/Z, Zop3.B; MSB Zresult.B, Pg/M, Zop2.B, Zop1.B" + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right); + + /// svuint16_t svmls[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3) : "MLS Ztied1.H, Pg/M, Zop2.H, Zop3.H" or "MOVPRFX Zresult, Zop1; MLS Zresult.H, Pg/M, Zop2.H, Zop3.H" + /// svuint16_t svmls[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3) : "MLS Ztied1.H, Pg/M, Zop2.H, Zop3.H" or "MSB Ztied2.H, Pg/M, Zop3.H, Zop1.H" or "MSB Ztied3.H, Pg/M, Zop2.H, Zop1.H" or "MOVPRFX Zresult, Zop1; MLS Zresult.H, Pg/M, Zop2.H, Zop3.H" + /// svuint16_t svmls[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; MLS Zresult.H, Pg/M, Zop2.H, Zop3.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; MSB Zresult.H, Pg/M, Zop3.H, Zop1.H" or "MOVPRFX Zresult.H, Pg/Z, Zop3.H; MSB Zresult.H, Pg/M, Zop2.H, Zop1.H" + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right); + + /// svuint32_t svmls[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3) : "MLS Ztied1.S, Pg/M, Zop2.S, Zop3.S" or "MOVPRFX Zresult, Zop1; MLS Zresult.S, Pg/M, Zop2.S, Zop3.S" + /// svuint32_t svmls[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3) : "MLS Ztied1.S, Pg/M, Zop2.S, Zop3.S" or "MSB Ztied2.S, Pg/M, Zop3.S, Zop1.S" or "MSB Ztied3.S, Pg/M, Zop2.S, Zop1.S" or "MOVPRFX Zresult, Zop1; MLS Zresult.S, Pg/M, Zop2.S, Zop3.S" + /// svuint32_t svmls[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; MLS Zresult.S, Pg/M, Zop2.S, Zop3.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; MSB Zresult.S, Pg/M, Zop3.S, Zop1.S" or "MOVPRFX Zresult.S, Pg/Z, Zop3.S; MSB Zresult.S, Pg/M, Zop2.S, Zop1.S" + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right); + + /// svuint64_t svmls[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3) : "MLS Ztied1.D, Pg/M, Zop2.D, Zop3.D" or "MOVPRFX Zresult, Zop1; MLS Zresult.D, Pg/M, Zop2.D, Zop3.D" + /// svuint64_t svmls[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3) : "MLS Ztied1.D, Pg/M, Zop2.D, Zop3.D" or "MSB Ztied2.D, Pg/M, Zop3.D, Zop1.D" or "MSB Ztied3.D, Pg/M, Zop2.D, Zop1.D" or "MOVPRFX Zresult, Zop1; MLS Zresult.D, Pg/M, Zop2.D, Zop3.D" + /// svuint64_t svmls[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; MLS Zresult.D, Pg/M, Zop2.D, Zop3.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; MSB Zresult.D, Pg/M, Zop3.D, Zop1.D" or "MOVPRFX Zresult.D, Pg/Z, Zop3.D; MSB Zresult.D, Pg/M, Zop2.D, Zop1.D" + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right); + + + /// Negate : Negate + + /// svfloat32_t svneg[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) : "FNEG Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; FNEG Zresult.S, Pg/M, Zop.S" + /// svfloat32_t svneg[_f32]_x(svbool_t pg, svfloat32_t op) : "FNEG Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; FNEG Zresult.S, Pg/M, Zop.S" + /// svfloat32_t svneg[_f32]_z(svbool_t pg, svfloat32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; FNEG Zresult.S, Pg/M, Zop.S" + public static unsafe Vector Negate(Vector value); + + /// svfloat64_t svneg[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) : "FNEG Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; FNEG Zresult.D, Pg/M, Zop.D" + /// svfloat64_t svneg[_f64]_x(svbool_t pg, svfloat64_t op) : "FNEG Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; FNEG Zresult.D, Pg/M, Zop.D" + /// svfloat64_t svneg[_f64]_z(svbool_t pg, svfloat64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; FNEG Zresult.D, Pg/M, Zop.D" + public static unsafe Vector Negate(Vector value); + + /// svint8_t svneg[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) : "NEG Ztied.B, Pg/M, Zop.B" or "MOVPRFX Zresult, Zinactive; NEG Zresult.B, Pg/M, Zop.B" + /// svint8_t svneg[_s8]_x(svbool_t pg, svint8_t op) : "NEG Ztied.B, Pg/M, Ztied.B" or "MOVPRFX Zresult, Zop; NEG Zresult.B, Pg/M, Zop.B" + /// svint8_t svneg[_s8]_z(svbool_t pg, svint8_t op) : "MOVPRFX Zresult.B, Pg/Z, Zop.B; NEG Zresult.B, Pg/M, Zop.B" + public static unsafe Vector Negate(Vector value); + + /// svint16_t svneg[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) : "NEG Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; NEG Zresult.H, Pg/M, Zop.H" + /// svint16_t svneg[_s16]_x(svbool_t pg, svint16_t op) : "NEG Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; NEG Zresult.H, Pg/M, Zop.H" + /// svint16_t svneg[_s16]_z(svbool_t pg, svint16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; NEG Zresult.H, Pg/M, Zop.H" + public static unsafe Vector Negate(Vector value); + + /// svint32_t svneg[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) : "NEG Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; NEG Zresult.S, Pg/M, Zop.S" + /// svint32_t svneg[_s32]_x(svbool_t pg, svint32_t op) : "NEG Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; NEG Zresult.S, Pg/M, Zop.S" + /// svint32_t svneg[_s32]_z(svbool_t pg, svint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; NEG Zresult.S, Pg/M, Zop.S" + public static unsafe Vector Negate(Vector value); + + /// svint64_t svneg[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) : "NEG Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; NEG Zresult.D, Pg/M, Zop.D" + /// svint64_t svneg[_s64]_x(svbool_t pg, svint64_t op) : "NEG Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; NEG Zresult.D, Pg/M, Zop.D" + /// svint64_t svneg[_s64]_z(svbool_t pg, svint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; NEG Zresult.D, Pg/M, Zop.D" + public static unsafe Vector Negate(Vector value); + + + /// SignExtend16 : Sign-extend the low 16 bits + + /// svint32_t svexth[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) : "SXTH Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; SXTH Zresult.S, Pg/M, Zop.S" + /// svint32_t svexth[_s32]_x(svbool_t pg, svint32_t op) : "SXTH Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; SXTH Zresult.S, Pg/M, Zop.S" + /// svint32_t svexth[_s32]_z(svbool_t pg, svint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; SXTH Zresult.S, Pg/M, Zop.S" + public static unsafe Vector SignExtend16(Vector value); + + /// svint64_t svexth[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) : "SXTH Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; SXTH Zresult.D, Pg/M, Zop.D" + /// svint64_t svexth[_s64]_x(svbool_t pg, svint64_t op) : "SXTH Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; SXTH Zresult.D, Pg/M, Zop.D" + /// svint64_t svexth[_s64]_z(svbool_t pg, svint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; SXTH Zresult.D, Pg/M, Zop.D" + public static unsafe Vector SignExtend16(Vector value); + + + /// SignExtend32 : Sign-extend the low 32 bits + + /// svint64_t svextw[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) : "SXTW Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; SXTW Zresult.D, Pg/M, Zop.D" + /// svint64_t svextw[_s64]_x(svbool_t pg, svint64_t op) : "SXTW Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; SXTW Zresult.D, Pg/M, Zop.D" + /// svint64_t svextw[_s64]_z(svbool_t pg, svint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; SXTW Zresult.D, Pg/M, Zop.D" + public static unsafe Vector SignExtend32(Vector value); + + + /// SignExtend8 : Sign-extend the low 8 bits + + /// svint16_t svextb[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) : "SXTB Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; SXTB Zresult.H, Pg/M, Zop.H" + /// svint16_t svextb[_s16]_x(svbool_t pg, svint16_t op) : "SXTB Ztied.H, Pg/M, Ztied.H" or "MOVPRFX Zresult, Zop; SXTB Zresult.H, Pg/M, Zop.H" + /// svint16_t svextb[_s16]_z(svbool_t pg, svint16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; SXTB Zresult.H, Pg/M, Zop.H" + public static unsafe Vector SignExtend8(Vector value); + + /// svint32_t svextb[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) : "SXTB Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; SXTB Zresult.S, Pg/M, Zop.S" + /// svint32_t svextb[_s32]_x(svbool_t pg, svint32_t op) : "SXTB Ztied.S, Pg/M, Ztied.S" or "MOVPRFX Zresult, Zop; SXTB Zresult.S, Pg/M, Zop.S" + /// svint32_t svextb[_s32]_z(svbool_t pg, svint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; SXTB Zresult.S, Pg/M, Zop.S" + public static unsafe Vector SignExtend8(Vector value); + + /// svint64_t svextb[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) : "SXTB Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; SXTB Zresult.D, Pg/M, Zop.D" + /// svint64_t svextb[_s64]_x(svbool_t pg, svint64_t op) : "SXTB Ztied.D, Pg/M, Ztied.D" or "MOVPRFX Zresult, Zop; SXTB Zresult.D, Pg/M, Zop.D" + /// svint64_t svextb[_s64]_z(svbool_t pg, svint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; SXTB Zresult.D, Pg/M, Zop.D" + public static unsafe Vector SignExtend8(Vector value); + + + /// SignExtendWideningLower : Unpack and extend low half + + /// svint16_t svunpklo[_s16](svint8_t op) : "SUNPKLO Zresult.H, Zop.B" + public static unsafe Vector SignExtendWideningLower(Vector value); + + /// svint32_t svunpklo[_s32](svint16_t op) : "SUNPKLO Zresult.S, Zop.H" + public static unsafe Vector SignExtendWideningLower(Vector value); + + /// svint64_t svunpklo[_s64](svint32_t op) : "SUNPKLO Zresult.D, Zop.S" + public static unsafe Vector SignExtendWideningLower(Vector value); + + + /// SignExtendWideningUpper : Unpack and extend high half + + /// svint16_t svunpkhi[_s16](svint8_t op) : "SUNPKHI Zresult.H, Zop.B" + public static unsafe Vector SignExtendWideningUpper(Vector value); + + /// svint32_t svunpkhi[_s32](svint16_t op) : "SUNPKHI Zresult.S, Zop.H" + public static unsafe Vector SignExtendWideningUpper(Vector value); + + /// svint64_t svunpkhi[_s64](svint32_t op) : "SUNPKHI Zresult.D, Zop.S" + public static unsafe Vector SignExtendWideningUpper(Vector value); + + + /// Subtract : Subtract + + /// svfloat32_t svsub[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FSUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; FSUB Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svfloat32_t svsub[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "FSUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "FSUBR Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "FSUB Zresult.S, Zop1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; FSUB Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svfloat32_t svsub[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; FSUB Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; FSUBR Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector Subtract(Vector left, Vector right); + + /// svfloat64_t svsub[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FSUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; FSUB Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svfloat64_t svsub[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "FSUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "FSUBR Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "FSUB Zresult.D, Zop1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; FSUB Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svfloat64_t svsub[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; FSUB Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; FSUBR Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector Subtract(Vector left, Vector right); + + /// svint8_t svsub[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) : "SUB Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; SUB Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svint8_t svsub[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) : "SUB Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "SUBR Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "SUB Zresult.B, Zop1.B, Zop2.B" + /// svint8_t svsub[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; SUB Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; SUBR Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector Subtract(Vector left, Vector right); + + /// svint16_t svsub[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) : "SUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; SUB Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svint16_t svsub[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) : "SUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "SUBR Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "SUB Zresult.H, Zop1.H, Zop2.H" + /// svint16_t svsub[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; SUB Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; SUBR Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector Subtract(Vector left, Vector right); + + /// svint32_t svsub[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) : "SUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; SUB Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svint32_t svsub[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) : "SUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "SUBR Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "SUB Zresult.S, Zop1.S, Zop2.S" + /// svint32_t svsub[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; SUB Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; SUBR Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector Subtract(Vector left, Vector right); + + /// svint64_t svsub[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) : "SUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; SUB Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svint64_t svsub[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) : "SUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "SUBR Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "SUB Zresult.D, Zop1.D, Zop2.D" + /// svint64_t svsub[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; SUB Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; SUBR Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector Subtract(Vector left, Vector right); + + /// svuint8_t svsub[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) : "SUB Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "MOVPRFX Zresult, Zop1; SUB Zresult.B, Pg/M, Zresult.B, Zop2.B" + /// svuint8_t svsub[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) : "SUB Ztied1.B, Pg/M, Ztied1.B, Zop2.B" or "SUBR Ztied2.B, Pg/M, Ztied2.B, Zop1.B" or "SUB Zresult.B, Zop1.B, Zop2.B" + /// svuint8_t svsub[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) : "MOVPRFX Zresult.B, Pg/Z, Zop1.B; SUB Zresult.B, Pg/M, Zresult.B, Zop2.B" or "MOVPRFX Zresult.B, Pg/Z, Zop2.B; SUBR Zresult.B, Pg/M, Zresult.B, Zop1.B" + public static unsafe Vector Subtract(Vector left, Vector right); + + /// svuint16_t svsub[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) : "SUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "MOVPRFX Zresult, Zop1; SUB Zresult.H, Pg/M, Zresult.H, Zop2.H" + /// svuint16_t svsub[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) : "SUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H" or "SUBR Ztied2.H, Pg/M, Ztied2.H, Zop1.H" or "SUB Zresult.H, Zop1.H, Zop2.H" + /// svuint16_t svsub[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) : "MOVPRFX Zresult.H, Pg/Z, Zop1.H; SUB Zresult.H, Pg/M, Zresult.H, Zop2.H" or "MOVPRFX Zresult.H, Pg/Z, Zop2.H; SUBR Zresult.H, Pg/M, Zresult.H, Zop1.H" + public static unsafe Vector Subtract(Vector left, Vector right); + + /// svuint32_t svsub[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) : "SUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "MOVPRFX Zresult, Zop1; SUB Zresult.S, Pg/M, Zresult.S, Zop2.S" + /// svuint32_t svsub[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) : "SUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S" or "SUBR Ztied2.S, Pg/M, Ztied2.S, Zop1.S" or "SUB Zresult.S, Zop1.S, Zop2.S" + /// svuint32_t svsub[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) : "MOVPRFX Zresult.S, Pg/Z, Zop1.S; SUB Zresult.S, Pg/M, Zresult.S, Zop2.S" or "MOVPRFX Zresult.S, Pg/Z, Zop2.S; SUBR Zresult.S, Pg/M, Zresult.S, Zop1.S" + public static unsafe Vector Subtract(Vector left, Vector right); + + /// svuint64_t svsub[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) : "SUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "MOVPRFX Zresult, Zop1; SUB Zresult.D, Pg/M, Zresult.D, Zop2.D" + /// svuint64_t svsub[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) : "SUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D" or "SUBR Ztied2.D, Pg/M, Ztied2.D, Zop1.D" or "SUB Zresult.D, Zop1.D, Zop2.D" + /// svuint64_t svsub[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) : "MOVPRFX Zresult.D, Pg/Z, Zop1.D; SUB Zresult.D, Pg/M, Zresult.D, Zop2.D" or "MOVPRFX Zresult.D, Pg/Z, Zop2.D; SUBR Zresult.D, Pg/M, Zresult.D, Zop1.D" + public static unsafe Vector Subtract(Vector left, Vector right); + + + /// SubtractSaturate : Saturating subtract + + /// svint8_t svqsub[_s8](svint8_t op1, svint8_t op2) : "SQSUB Zresult.B, Zop1.B, Zop2.B" + public static unsafe Vector SubtractSaturate(Vector left, Vector right); + + /// svint16_t svqsub[_s16](svint16_t op1, svint16_t op2) : "SQSUB Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector SubtractSaturate(Vector left, Vector right); + + /// svint32_t svqsub[_s32](svint32_t op1, svint32_t op2) : "SQSUB Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector SubtractSaturate(Vector left, Vector right); + + /// svint64_t svqsub[_s64](svint64_t op1, svint64_t op2) : "SQSUB Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector SubtractSaturate(Vector left, Vector right); + + /// svuint8_t svqsub[_u8](svuint8_t op1, svuint8_t op2) : "UQSUB Zresult.B, Zop1.B, Zop2.B" + public static unsafe Vector SubtractSaturate(Vector left, Vector right); + + /// svuint16_t svqsub[_u16](svuint16_t op1, svuint16_t op2) : "UQSUB Zresult.H, Zop1.H, Zop2.H" + public static unsafe Vector SubtractSaturate(Vector left, Vector right); + + /// svuint32_t svqsub[_u32](svuint32_t op1, svuint32_t op2) : "UQSUB Zresult.S, Zop1.S, Zop2.S" + public static unsafe Vector SubtractSaturate(Vector left, Vector right); + + /// svuint64_t svqsub[_u64](svuint64_t op1, svuint64_t op2) : "UQSUB Zresult.D, Zop1.D, Zop2.D" + public static unsafe Vector SubtractSaturate(Vector left, Vector right); + + + /// ZeroExtend16 : Zero-extend the low 16 bits + + /// svuint32_t svexth[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) : "UXTH Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; UXTH Zresult.S, Pg/M, Zop.S" + /// svuint32_t svexth[_u32]_x(svbool_t pg, svuint32_t op) : "UXTH Ztied.S, Pg/M, Ztied.S" or "AND Ztied.S, Ztied.S, #65535" + /// svuint32_t svexth[_u32]_z(svbool_t pg, svuint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; UXTH Zresult.S, Pg/M, Zop.S" + public static unsafe Vector ZeroExtend16(Vector value); + + /// svuint64_t svexth[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) : "UXTH Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; UXTH Zresult.D, Pg/M, Zop.D" + /// svuint64_t svexth[_u64]_x(svbool_t pg, svuint64_t op) : "UXTH Ztied.D, Pg/M, Ztied.D" or "AND Ztied.D, Ztied.D, #65535" + /// svuint64_t svexth[_u64]_z(svbool_t pg, svuint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; UXTH Zresult.D, Pg/M, Zop.D" + public static unsafe Vector ZeroExtend16(Vector value); + + + /// ZeroExtend32 : Zero-extend the low 32 bits + + /// svuint64_t svextw[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) : "UXTW Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; UXTW Zresult.D, Pg/M, Zop.D" + /// svuint64_t svextw[_u64]_x(svbool_t pg, svuint64_t op) : "UXTW Ztied.D, Pg/M, Ztied.D" or "AND Ztied.D, Ztied.D, #4294967295" + /// svuint64_t svextw[_u64]_z(svbool_t pg, svuint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; UXTW Zresult.D, Pg/M, Zop.D" + public static unsafe Vector ZeroExtend32(Vector value); + + + /// ZeroExtend8 : Zero-extend the low 8 bits + + /// svuint16_t svextb[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) : "UXTB Ztied.H, Pg/M, Zop.H" or "MOVPRFX Zresult, Zinactive; UXTB Zresult.H, Pg/M, Zop.H" + /// svuint16_t svextb[_u16]_x(svbool_t pg, svuint16_t op) : "UXTB Ztied.H, Pg/M, Ztied.H" or "AND Ztied.H, Ztied.H, #255" + /// svuint16_t svextb[_u16]_z(svbool_t pg, svuint16_t op) : "MOVPRFX Zresult.H, Pg/Z, Zop.H; UXTB Zresult.H, Pg/M, Zop.H" + public static unsafe Vector ZeroExtend8(Vector value); + + /// svuint32_t svextb[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) : "UXTB Ztied.S, Pg/M, Zop.S" or "MOVPRFX Zresult, Zinactive; UXTB Zresult.S, Pg/M, Zop.S" + /// svuint32_t svextb[_u32]_x(svbool_t pg, svuint32_t op) : "UXTB Ztied.S, Pg/M, Ztied.S" or "AND Ztied.S, Ztied.S, #255" + /// svuint32_t svextb[_u32]_z(svbool_t pg, svuint32_t op) : "MOVPRFX Zresult.S, Pg/Z, Zop.S; UXTB Zresult.S, Pg/M, Zop.S" + public static unsafe Vector ZeroExtend8(Vector value); + + /// svuint64_t svextb[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) : "UXTB Ztied.D, Pg/M, Zop.D" or "MOVPRFX Zresult, Zinactive; UXTB Zresult.D, Pg/M, Zop.D" + /// svuint64_t svextb[_u64]_x(svbool_t pg, svuint64_t op) : "UXTB Ztied.D, Pg/M, Ztied.D" or "AND Ztied.D, Ztied.D, #255" + /// svuint64_t svextb[_u64]_z(svbool_t pg, svuint64_t op) : "MOVPRFX Zresult.D, Pg/Z, Zop.D; UXTB Zresult.D, Pg/M, Zop.D" + public static unsafe Vector ZeroExtend8(Vector value); + + + /// ZeroExtendWideningLower : Unpack and extend low half + + /// svuint16_t svunpklo[_u16](svuint8_t op) : "UUNPKLO Zresult.H, Zop.B" + /// svbool_t svunpklo[_b](svbool_t op) : "PUNPKLO Presult.H, Pop.B" + public static unsafe Vector ZeroExtendWideningLower(Vector value); + + /// svuint32_t svunpklo[_u32](svuint16_t op) : "UUNPKLO Zresult.S, Zop.H" + /// svbool_t svunpklo[_b](svbool_t op) : "PUNPKLO Presult.H, Pop.B" + public static unsafe Vector ZeroExtendWideningLower(Vector value); + + /// svuint64_t svunpklo[_u64](svuint32_t op) : "UUNPKLO Zresult.D, Zop.S" + /// svbool_t svunpklo[_b](svbool_t op) : "PUNPKLO Presult.H, Pop.B" + public static unsafe Vector ZeroExtendWideningLower(Vector value); + + + /// ZeroExtendWideningUpper : Unpack and extend high half + + /// svuint16_t svunpkhi[_u16](svuint8_t op) : "UUNPKHI Zresult.H, Zop.B" + /// svbool_t svunpkhi[_b](svbool_t op) : "PUNPKHI Presult.H, Pop.B" + public static unsafe Vector ZeroExtendWideningUpper(Vector value); + + /// svuint32_t svunpkhi[_u32](svuint16_t op) : "UUNPKHI Zresult.S, Zop.H" + /// svbool_t svunpkhi[_b](svbool_t op) : "PUNPKHI Presult.H, Pop.B" + public static unsafe Vector ZeroExtendWideningUpper(Vector value); + + /// svuint64_t svunpkhi[_u64](svuint32_t op) : "UUNPKHI Zresult.D, Zop.S" + /// svbool_t svunpkhi[_b](svbool_t op) : "PUNPKHI Presult.H, Pop.B" + public static unsafe Vector ZeroExtendWideningUpper(Vector value); + + + /// total method signatures: 196 + /// total method names: 49 +} + + /// Optional Entries: + /// public static unsafe Vector MultiplyReturningHighHalf(Vector left, Vector right); // svmulh[_s8]_m or svmulh[_s8]_x or svmulh[_s8]_z + /// public static unsafe Vector MultiplyReturningHighHalf(Vector left, Vector right); // svmulh[_s16]_m or svmulh[_s16]_x or svmulh[_s16]_z + /// public static unsafe Vector MultiplyReturningHighHalf(Vector left, Vector right); // svmulh[_s32]_m or svmulh[_s32]_x or svmulh[_s32]_z + /// public static unsafe Vector MultiplyReturningHighHalf(Vector left, Vector right); // svmulh[_s64]_m or svmulh[_s64]_x or svmulh[_s64]_z + /// public static unsafe Vector MultiplyReturningHighHalf(Vector left, Vector right); // svmulh[_u8]_m or svmulh[_u8]_x or svmulh[_u8]_z + /// public static unsafe Vector MultiplyReturningHighHalf(Vector left, Vector right); // svmulh[_u16]_m or svmulh[_u16]_x or svmulh[_u16]_z + /// public static unsafe Vector MultiplyReturningHighHalf(Vector left, Vector right); // svmulh[_u32]_m or svmulh[_u32]_x or svmulh[_u32]_z + /// public static unsafe Vector MultiplyReturningHighHalf(Vector left, Vector right); // svmulh[_u64]_m or svmulh[_u64]_x or svmulh[_u64]_z + /// public static unsafe Vector MultiplyReturningHighHalf(Vector left, sbyte right); // svmulh[_n_s8]_m or svmulh[_n_s8]_x or svmulh[_n_s8]_z + /// public static unsafe Vector MultiplyReturningHighHalf(Vector left, short right); // svmulh[_n_s16]_m or svmulh[_n_s16]_x or svmulh[_n_s16]_z + /// public static unsafe Vector MultiplyReturningHighHalf(Vector left, int right); // svmulh[_n_s32]_m or svmulh[_n_s32]_x or svmulh[_n_s32]_z + /// public static unsafe Vector MultiplyReturningHighHalf(Vector left, long right); // svmulh[_n_s64]_m or svmulh[_n_s64]_x or svmulh[_n_s64]_z + /// public static unsafe Vector MultiplyReturningHighHalf(Vector left, byte right); // svmulh[_n_u8]_m or svmulh[_n_u8]_x or svmulh[_n_u8]_z + /// public static unsafe Vector MultiplyReturningHighHalf(Vector left, ushort right); // svmulh[_n_u16]_m or svmulh[_n_u16]_x or svmulh[_n_u16]_z + /// public static unsafe Vector MultiplyReturningHighHalf(Vector left, uint right); // svmulh[_n_u32]_m or svmulh[_n_u32]_x or svmulh[_n_u32]_z + /// public static unsafe Vector MultiplyReturningHighHalf(Vector left, ulong right); // svmulh[_n_u64]_m or svmulh[_n_u64]_x or svmulh[_n_u64]_z + /// Total Maybe: 16 + + /// Rejected: + /// public static unsafe Vector AbsoluteDifference(Vector left, float right); // svabd[_n_f32]_m or svabd[_n_f32]_x or svabd[_n_f32]_z + /// public static unsafe Vector AbsoluteDifference(Vector left, double right); // svabd[_n_f64]_m or svabd[_n_f64]_x or svabd[_n_f64]_z + /// public static unsafe Vector AbsoluteDifference(Vector left, sbyte right); // svabd[_n_s8]_m or svabd[_n_s8]_x or svabd[_n_s8]_z + /// public static unsafe Vector AbsoluteDifference(Vector left, short right); // svabd[_n_s16]_m or svabd[_n_s16]_x or svabd[_n_s16]_z + /// public static unsafe Vector AbsoluteDifference(Vector left, int right); // svabd[_n_s32]_m or svabd[_n_s32]_x or svabd[_n_s32]_z + /// public static unsafe Vector AbsoluteDifference(Vector left, long right); // svabd[_n_s64]_m or svabd[_n_s64]_x or svabd[_n_s64]_z + /// public static unsafe Vector AbsoluteDifference(Vector left, byte right); // svabd[_n_u8]_m or svabd[_n_u8]_x or svabd[_n_u8]_z + /// public static unsafe Vector AbsoluteDifference(Vector left, ushort right); // svabd[_n_u16]_m or svabd[_n_u16]_x or svabd[_n_u16]_z + /// public static unsafe Vector AbsoluteDifference(Vector left, uint right); // svabd[_n_u32]_m or svabd[_n_u32]_x or svabd[_n_u32]_z + /// public static unsafe Vector AbsoluteDifference(Vector left, ulong right); // svabd[_n_u64]_m or svabd[_n_u64]_x or svabd[_n_u64]_z + /// public static unsafe Vector Add(Vector left, float right); // svadd[_n_f32]_m or svadd[_n_f32]_x or svadd[_n_f32]_z + /// public static unsafe Vector Add(Vector left, double right); // svadd[_n_f64]_m or svadd[_n_f64]_x or svadd[_n_f64]_z + /// public static unsafe Vector Add(Vector left, sbyte right); // svadd[_n_s8]_m or svadd[_n_s8]_x or svadd[_n_s8]_z + /// public static unsafe Vector Add(Vector left, short right); // svadd[_n_s16]_m or svadd[_n_s16]_x or svadd[_n_s16]_z + /// public static unsafe Vector Add(Vector left, int right); // svadd[_n_s32]_m or svadd[_n_s32]_x or svadd[_n_s32]_z + /// public static unsafe Vector Add(Vector left, long right); // svadd[_n_s64]_m or svadd[_n_s64]_x or svadd[_n_s64]_z + /// public static unsafe Vector Add(Vector left, byte right); // svadd[_n_u8]_m or svadd[_n_u8]_x or svadd[_n_u8]_z + /// public static unsafe Vector Add(Vector left, ushort right); // svadd[_n_u16]_m or svadd[_n_u16]_x or svadd[_n_u16]_z + /// public static unsafe Vector Add(Vector left, uint right); // svadd[_n_u32]_m or svadd[_n_u32]_x or svadd[_n_u32]_z + /// public static unsafe Vector Add(Vector left, ulong right); // svadd[_n_u64]_m or svadd[_n_u64]_x or svadd[_n_u64]_z + /// public static unsafe Vector AddSaturate(Vector left, sbyte right); // svqadd[_n_s8] + /// public static unsafe Vector AddSaturate(Vector left, short right); // svqadd[_n_s16] + /// public static unsafe Vector AddSaturate(Vector left, int right); // svqadd[_n_s32] + /// public static unsafe Vector AddSaturate(Vector left, long right); // svqadd[_n_s64] + /// public static unsafe Vector AddSaturate(Vector left, byte right); // svqadd[_n_u8] + /// public static unsafe Vector AddSaturate(Vector left, ushort right); // svqadd[_n_u16] + /// public static unsafe Vector AddSaturate(Vector left, uint right); // svqadd[_n_u32] + /// public static unsafe Vector AddSaturate(Vector left, ulong right); // svqadd[_n_u64] + /// public static unsafe Vector Divide(Vector left, float right); // svdiv[_n_f32]_m or svdiv[_n_f32]_x or svdiv[_n_f32]_z + /// public static unsafe Vector Divide(Vector left, double right); // svdiv[_n_f64]_m or svdiv[_n_f64]_x or svdiv[_n_f64]_z + /// public static unsafe Vector Divide(Vector left, int right); // svdiv[_n_s32]_m or svdiv[_n_s32]_x or svdiv[_n_s32]_z + /// public static unsafe Vector Divide(Vector left, long right); // svdiv[_n_s64]_m or svdiv[_n_s64]_x or svdiv[_n_s64]_z + /// public static unsafe Vector Divide(Vector left, uint right); // svdiv[_n_u32]_m or svdiv[_n_u32]_x or svdiv[_n_u32]_z + /// public static unsafe Vector Divide(Vector left, ulong right); // svdiv[_n_u64]_m or svdiv[_n_u64]_x or svdiv[_n_u64]_z + /// public static unsafe Vector DivideReversed(Vector left, Vector right); // svdivr[_f32]_m or svdivr[_f32]_x or svdivr[_f32]_z + /// public static unsafe Vector DivideReversed(Vector left, Vector right); // svdivr[_f64]_m or svdivr[_f64]_x or svdivr[_f64]_z + /// public static unsafe Vector DivideReversed(Vector left, Vector right); // svdivr[_s32]_m or svdivr[_s32]_x or svdivr[_s32]_z + /// public static unsafe Vector DivideReversed(Vector left, Vector right); // svdivr[_s64]_m or svdivr[_s64]_x or svdivr[_s64]_z + /// public static unsafe Vector DivideReversed(Vector left, Vector right); // svdivr[_u32]_m or svdivr[_u32]_x or svdivr[_u32]_z + /// public static unsafe Vector DivideReversed(Vector left, Vector right); // svdivr[_u64]_m or svdivr[_u64]_x or svdivr[_u64]_z + /// public static unsafe Vector DivideReversed(Vector left, float right); // svdivr[_n_f32]_m or svdivr[_n_f32]_x or svdivr[_n_f32]_z + /// public static unsafe Vector DivideReversed(Vector left, double right); // svdivr[_n_f64]_m or svdivr[_n_f64]_x or svdivr[_n_f64]_z + /// public static unsafe Vector DivideReversed(Vector left, int right); // svdivr[_n_s32]_m or svdivr[_n_s32]_x or svdivr[_n_s32]_z + /// public static unsafe Vector DivideReversed(Vector left, long right); // svdivr[_n_s64]_m or svdivr[_n_s64]_x or svdivr[_n_s64]_z + /// public static unsafe Vector DivideReversed(Vector left, uint right); // svdivr[_n_u32]_m or svdivr[_n_u32]_x or svdivr[_n_u32]_z + /// public static unsafe Vector DivideReversed(Vector left, ulong right); // svdivr[_n_u64]_m or svdivr[_n_u64]_x or svdivr[_n_u64]_z + /// public static unsafe Vector DotProduct(Vector addend, Vector left, sbyte right); // svdot[_n_s32] + /// public static unsafe Vector DotProduct(Vector addend, Vector left, short right); // svdot[_n_s64] + /// public static unsafe Vector DotProduct(Vector addend, Vector left, byte right); // svdot[_n_u32] + /// public static unsafe Vector DotProduct(Vector addend, Vector left, ushort right); // svdot[_n_u64] + /// public static unsafe Vector Max(Vector left, float right); // svmax[_n_f32]_m or svmax[_n_f32]_x or svmax[_n_f32]_z + /// public static unsafe Vector Max(Vector left, double right); // svmax[_n_f64]_m or svmax[_n_f64]_x or svmax[_n_f64]_z + /// public static unsafe Vector Max(Vector left, sbyte right); // svmax[_n_s8]_m or svmax[_n_s8]_x or svmax[_n_s8]_z + /// public static unsafe Vector Max(Vector left, short right); // svmax[_n_s16]_m or svmax[_n_s16]_x or svmax[_n_s16]_z + /// public static unsafe Vector Max(Vector left, int right); // svmax[_n_s32]_m or svmax[_n_s32]_x or svmax[_n_s32]_z + /// public static unsafe Vector Max(Vector left, long right); // svmax[_n_s64]_m or svmax[_n_s64]_x or svmax[_n_s64]_z + /// public static unsafe Vector Max(Vector left, byte right); // svmax[_n_u8]_m or svmax[_n_u8]_x or svmax[_n_u8]_z + /// public static unsafe Vector Max(Vector left, ushort right); // svmax[_n_u16]_m or svmax[_n_u16]_x or svmax[_n_u16]_z + /// public static unsafe Vector Max(Vector left, uint right); // svmax[_n_u32]_m or svmax[_n_u32]_x or svmax[_n_u32]_z + /// public static unsafe Vector Max(Vector left, ulong right); // svmax[_n_u64]_m or svmax[_n_u64]_x or svmax[_n_u64]_z + /// public static unsafe Vector MaxNumber(Vector left, float right); // svmaxnm[_n_f32]_m or svmaxnm[_n_f32]_x or svmaxnm[_n_f32]_z + /// public static unsafe Vector MaxNumber(Vector left, double right); // svmaxnm[_n_f64]_m or svmaxnm[_n_f64]_x or svmaxnm[_n_f64]_z + /// public static unsafe Vector Min(Vector left, float right); // svmin[_n_f32]_m or svmin[_n_f32]_x or svmin[_n_f32]_z + /// public static unsafe Vector Min(Vector left, double right); // svmin[_n_f64]_m or svmin[_n_f64]_x or svmin[_n_f64]_z + /// public static unsafe Vector Min(Vector left, sbyte right); // svmin[_n_s8]_m or svmin[_n_s8]_x or svmin[_n_s8]_z + /// public static unsafe Vector Min(Vector left, short right); // svmin[_n_s16]_m or svmin[_n_s16]_x or svmin[_n_s16]_z + /// public static unsafe Vector Min(Vector left, int right); // svmin[_n_s32]_m or svmin[_n_s32]_x or svmin[_n_s32]_z + /// public static unsafe Vector Min(Vector left, long right); // svmin[_n_s64]_m or svmin[_n_s64]_x or svmin[_n_s64]_z + /// public static unsafe Vector Min(Vector left, byte right); // svmin[_n_u8]_m or svmin[_n_u8]_x or svmin[_n_u8]_z + /// public static unsafe Vector Min(Vector left, ushort right); // svmin[_n_u16]_m or svmin[_n_u16]_x or svmin[_n_u16]_z + /// public static unsafe Vector Min(Vector left, uint right); // svmin[_n_u32]_m or svmin[_n_u32]_x or svmin[_n_u32]_z + /// public static unsafe Vector Min(Vector left, ulong right); // svmin[_n_u64]_m or svmin[_n_u64]_x or svmin[_n_u64]_z + /// public static unsafe Vector MinNumber(Vector left, float right); // svminnm[_n_f32]_m or svminnm[_n_f32]_x or svminnm[_n_f32]_z + /// public static unsafe Vector MinNumber(Vector left, double right); // svminnm[_n_f64]_m or svminnm[_n_f64]_x or svminnm[_n_f64]_z + /// public static unsafe Vector Multiply(Vector left, float right); // svmul[_n_f32]_m or svmul[_n_f32]_x or svmul[_n_f32]_z + /// public static unsafe Vector Multiply(Vector left, double right); // svmul[_n_f64]_m or svmul[_n_f64]_x or svmul[_n_f64]_z + /// public static unsafe Vector Multiply(Vector left, sbyte right); // svmul[_n_s8]_m or svmul[_n_s8]_x or svmul[_n_s8]_z + /// public static unsafe Vector Multiply(Vector left, short right); // svmul[_n_s16]_m or svmul[_n_s16]_x or svmul[_n_s16]_z + /// public static unsafe Vector Multiply(Vector left, int right); // svmul[_n_s32]_m or svmul[_n_s32]_x or svmul[_n_s32]_z + /// public static unsafe Vector Multiply(Vector left, long right); // svmul[_n_s64]_m or svmul[_n_s64]_x or svmul[_n_s64]_z + /// public static unsafe Vector Multiply(Vector left, byte right); // svmul[_n_u8]_m or svmul[_n_u8]_x or svmul[_n_u8]_z + /// public static unsafe Vector Multiply(Vector left, ushort right); // svmul[_n_u16]_m or svmul[_n_u16]_x or svmul[_n_u16]_z + /// public static unsafe Vector Multiply(Vector left, uint right); // svmul[_n_u32]_m or svmul[_n_u32]_x or svmul[_n_u32]_z + /// public static unsafe Vector Multiply(Vector left, ulong right); // svmul[_n_u64]_m or svmul[_n_u64]_x or svmul[_n_u64]_z + /// public static unsafe Vector MultiplyAdd(Vector addend, Vector left, float right); // svmla[_n_f32]_m or svmla[_n_f32]_x or svmla[_n_f32]_z + /// public static unsafe Vector MultiplyAdd(Vector addend, Vector left, double right); // svmla[_n_f64]_m or svmla[_n_f64]_x or svmla[_n_f64]_z + /// public static unsafe Vector MultiplyAdd(Vector addend, Vector left, sbyte right); // svmla[_n_s8]_m or svmla[_n_s8]_x or svmla[_n_s8]_z + /// public static unsafe Vector MultiplyAdd(Vector addend, Vector left, short right); // svmla[_n_s16]_m or svmla[_n_s16]_x or svmla[_n_s16]_z + /// public static unsafe Vector MultiplyAdd(Vector addend, Vector left, int right); // svmla[_n_s32]_m or svmla[_n_s32]_x or svmla[_n_s32]_z + /// public static unsafe Vector MultiplyAdd(Vector addend, Vector left, long right); // svmla[_n_s64]_m or svmla[_n_s64]_x or svmla[_n_s64]_z + /// public static unsafe Vector MultiplyAdd(Vector addend, Vector left, byte right); // svmla[_n_u8]_m or svmla[_n_u8]_x or svmla[_n_u8]_z + /// public static unsafe Vector MultiplyAdd(Vector addend, Vector left, ushort right); // svmla[_n_u16]_m or svmla[_n_u16]_x or svmla[_n_u16]_z + /// public static unsafe Vector MultiplyAdd(Vector addend, Vector left, uint right); // svmla[_n_u32]_m or svmla[_n_u32]_x or svmla[_n_u32]_z + /// public static unsafe Vector MultiplyAdd(Vector addend, Vector left, ulong right); // svmla[_n_u64]_m or svmla[_n_u64]_x or svmla[_n_u64]_z + /// public static unsafe Vector MultiplyAddMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svmad[_f32]_m or svmad[_f32]_x or svmad[_f32]_z + /// public static unsafe Vector MultiplyAddMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svmad[_f64]_m or svmad[_f64]_x or svmad[_f64]_z + /// public static unsafe Vector MultiplyAddMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svmad[_s8]_m or svmad[_s8]_x or svmad[_s8]_z + /// public static unsafe Vector MultiplyAddMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svmad[_s16]_m or svmad[_s16]_x or svmad[_s16]_z + /// public static unsafe Vector MultiplyAddMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svmad[_s32]_m or svmad[_s32]_x or svmad[_s32]_z + /// public static unsafe Vector MultiplyAddMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svmad[_s64]_m or svmad[_s64]_x or svmad[_s64]_z + /// public static unsafe Vector MultiplyAddMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svmad[_u8]_m or svmad[_u8]_x or svmad[_u8]_z + /// public static unsafe Vector MultiplyAddMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svmad[_u16]_m or svmad[_u16]_x or svmad[_u16]_z + /// public static unsafe Vector MultiplyAddMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svmad[_u32]_m or svmad[_u32]_x or svmad[_u32]_z + /// public static unsafe Vector MultiplyAddMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svmad[_u64]_m or svmad[_u64]_x or svmad[_u64]_z + /// public static unsafe Vector MultiplyAddMultiplicandFirst(Vector op1, Vector op2, float op3); // svmad[_n_f32]_m or svmad[_n_f32]_x or svmad[_n_f32]_z + /// public static unsafe Vector MultiplyAddMultiplicandFirst(Vector op1, Vector op2, double op3); // svmad[_n_f64]_m or svmad[_n_f64]_x or svmad[_n_f64]_z + /// public static unsafe Vector MultiplyAddMultiplicandFirst(Vector op1, Vector op2, sbyte op3); // svmad[_n_s8]_m or svmad[_n_s8]_x or svmad[_n_s8]_z + /// public static unsafe Vector MultiplyAddMultiplicandFirst(Vector op1, Vector op2, short op3); // svmad[_n_s16]_m or svmad[_n_s16]_x or svmad[_n_s16]_z + /// public static unsafe Vector MultiplyAddMultiplicandFirst(Vector op1, Vector op2, int op3); // svmad[_n_s32]_m or svmad[_n_s32]_x or svmad[_n_s32]_z + /// public static unsafe Vector MultiplyAddMultiplicandFirst(Vector op1, Vector op2, long op3); // svmad[_n_s64]_m or svmad[_n_s64]_x or svmad[_n_s64]_z + /// public static unsafe Vector MultiplyAddMultiplicandFirst(Vector op1, Vector op2, byte op3); // svmad[_n_u8]_m or svmad[_n_u8]_x or svmad[_n_u8]_z + /// public static unsafe Vector MultiplyAddMultiplicandFirst(Vector op1, Vector op2, ushort op3); // svmad[_n_u16]_m or svmad[_n_u16]_x or svmad[_n_u16]_z + /// public static unsafe Vector MultiplyAddMultiplicandFirst(Vector op1, Vector op2, uint op3); // svmad[_n_u32]_m or svmad[_n_u32]_x or svmad[_n_u32]_z + /// public static unsafe Vector MultiplyAddMultiplicandFirst(Vector op1, Vector op2, ulong op3); // svmad[_n_u64]_m or svmad[_n_u64]_x or svmad[_n_u64]_z + /// public static unsafe Vector MultiplyAddNegated(Vector addend, Vector left, float right); // svnmla[_n_f32]_m or svnmla[_n_f32]_x or svnmla[_n_f32]_z + /// public static unsafe Vector MultiplyAddNegated(Vector addend, Vector left, double right); // svnmla[_n_f64]_m or svnmla[_n_f64]_x or svnmla[_n_f64]_z + /// public static unsafe Vector MultiplyExtended(Vector left, float right); // svmulx[_n_f32]_m or svmulx[_n_f32]_x or svmulx[_n_f32]_z + /// public static unsafe Vector MultiplyExtended(Vector left, double right); // svmulx[_n_f64]_m or svmulx[_n_f64]_x or svmulx[_n_f64]_z + /// public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, float right); // svmls[_n_f32]_m or svmls[_n_f32]_x or svmls[_n_f32]_z + /// public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, double right); // svmls[_n_f64]_m or svmls[_n_f64]_x or svmls[_n_f64]_z + /// public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, sbyte right); // svmls[_n_s8]_m or svmls[_n_s8]_x or svmls[_n_s8]_z + /// public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, short right); // svmls[_n_s16]_m or svmls[_n_s16]_x or svmls[_n_s16]_z + /// public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, int right); // svmls[_n_s32]_m or svmls[_n_s32]_x or svmls[_n_s32]_z + /// public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, long right); // svmls[_n_s64]_m or svmls[_n_s64]_x or svmls[_n_s64]_z + /// public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, byte right); // svmls[_n_u8]_m or svmls[_n_u8]_x or svmls[_n_u8]_z + /// public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, ushort right); // svmls[_n_u16]_m or svmls[_n_u16]_x or svmls[_n_u16]_z + /// public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, uint right); // svmls[_n_u32]_m or svmls[_n_u32]_x or svmls[_n_u32]_z + /// public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, ulong right); // svmls[_n_u64]_m or svmls[_n_u64]_x or svmls[_n_u64]_z + /// public static unsafe Vector MultiplySubtractMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svmsb[_f32]_m or svmsb[_f32]_x or svmsb[_f32]_z + /// public static unsafe Vector MultiplySubtractMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svmsb[_f64]_m or svmsb[_f64]_x or svmsb[_f64]_z + /// public static unsafe Vector MultiplySubtractMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svmsb[_s8]_m or svmsb[_s8]_x or svmsb[_s8]_z + /// public static unsafe Vector MultiplySubtractMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svmsb[_s16]_m or svmsb[_s16]_x or svmsb[_s16]_z + /// public static unsafe Vector MultiplySubtractMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svmsb[_s32]_m or svmsb[_s32]_x or svmsb[_s32]_z + /// public static unsafe Vector MultiplySubtractMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svmsb[_s64]_m or svmsb[_s64]_x or svmsb[_s64]_z + /// public static unsafe Vector MultiplySubtractMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svmsb[_u8]_m or svmsb[_u8]_x or svmsb[_u8]_z + /// public static unsafe Vector MultiplySubtractMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svmsb[_u16]_m or svmsb[_u16]_x or svmsb[_u16]_z + /// public static unsafe Vector MultiplySubtractMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svmsb[_u32]_m or svmsb[_u32]_x or svmsb[_u32]_z + /// public static unsafe Vector MultiplySubtractMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svmsb[_u64]_m or svmsb[_u64]_x or svmsb[_u64]_z + /// public static unsafe Vector MultiplySubtractMultiplicandFirst(Vector op1, Vector op2, float op3); // svmsb[_n_f32]_m or svmsb[_n_f32]_x or svmsb[_n_f32]_z + /// public static unsafe Vector MultiplySubtractMultiplicandFirst(Vector op1, Vector op2, double op3); // svmsb[_n_f64]_m or svmsb[_n_f64]_x or svmsb[_n_f64]_z + /// public static unsafe Vector MultiplySubtractMultiplicandFirst(Vector op1, Vector op2, sbyte op3); // svmsb[_n_s8]_m or svmsb[_n_s8]_x or svmsb[_n_s8]_z + /// public static unsafe Vector MultiplySubtractMultiplicandFirst(Vector op1, Vector op2, short op3); // svmsb[_n_s16]_m or svmsb[_n_s16]_x or svmsb[_n_s16]_z + /// public static unsafe Vector MultiplySubtractMultiplicandFirst(Vector op1, Vector op2, int op3); // svmsb[_n_s32]_m or svmsb[_n_s32]_x or svmsb[_n_s32]_z + /// public static unsafe Vector MultiplySubtractMultiplicandFirst(Vector op1, Vector op2, long op3); // svmsb[_n_s64]_m or svmsb[_n_s64]_x or svmsb[_n_s64]_z + /// public static unsafe Vector MultiplySubtractMultiplicandFirst(Vector op1, Vector op2, byte op3); // svmsb[_n_u8]_m or svmsb[_n_u8]_x or svmsb[_n_u8]_z + /// public static unsafe Vector MultiplySubtractMultiplicandFirst(Vector op1, Vector op2, ushort op3); // svmsb[_n_u16]_m or svmsb[_n_u16]_x or svmsb[_n_u16]_z + /// public static unsafe Vector MultiplySubtractMultiplicandFirst(Vector op1, Vector op2, uint op3); // svmsb[_n_u32]_m or svmsb[_n_u32]_x or svmsb[_n_u32]_z + /// public static unsafe Vector MultiplySubtractMultiplicandFirst(Vector op1, Vector op2, ulong op3); // svmsb[_n_u64]_m or svmsb[_n_u64]_x or svmsb[_n_u64]_z + /// public static unsafe Vector MultiplySubtractNegated(Vector minuend, Vector left, float right); // svnmls[_n_f32]_m or svnmls[_n_f32]_x or svnmls[_n_f32]_z + /// public static unsafe Vector MultiplySubtractNegated(Vector minuend, Vector left, double right); // svnmls[_n_f64]_m or svnmls[_n_f64]_x or svnmls[_n_f64]_z + /// public static unsafe Vector NegateMultiplyAddMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svnmad[_f32]_m or svnmad[_f32]_x or svnmad[_f32]_z + /// public static unsafe Vector NegateMultiplyAddMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svnmad[_f64]_m or svnmad[_f64]_x or svnmad[_f64]_z + /// public static unsafe Vector NegateMultiplyAddMultiplicandFirst(Vector op1, Vector op2, float op3); // svnmad[_n_f32]_m or svnmad[_n_f32]_x or svnmad[_n_f32]_z + /// public static unsafe Vector NegateMultiplyAddMultiplicandFirst(Vector op1, Vector op2, double op3); // svnmad[_n_f64]_m or svnmad[_n_f64]_x or svnmad[_n_f64]_z + /// public static unsafe Vector NegateMultiplySubtractMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svnmsb[_f32]_m or svnmsb[_f32]_x or svnmsb[_f32]_z + /// public static unsafe Vector NegateMultiplySubtractMultiplicandFirst(Vector op1, Vector op2, Vector op3); // svnmsb[_f64]_m or svnmsb[_f64]_x or svnmsb[_f64]_z + /// public static unsafe Vector NegateMultiplySubtractMultiplicandFirst(Vector op1, Vector op2, float op3); // svnmsb[_n_f32]_m or svnmsb[_n_f32]_x or svnmsb[_n_f32]_z + /// public static unsafe Vector NegateMultiplySubtractMultiplicandFirst(Vector op1, Vector op2, double op3); // svnmsb[_n_f64]_m or svnmsb[_n_f64]_x or svnmsb[_n_f64]_z + /// public static unsafe Vector Subtract(Vector left, float right); // svsub[_n_f32]_m or svsub[_n_f32]_x or svsub[_n_f32]_z + /// public static unsafe Vector Subtract(Vector left, double right); // svsub[_n_f64]_m or svsub[_n_f64]_x or svsub[_n_f64]_z + /// public static unsafe Vector Subtract(Vector left, sbyte right); // svsub[_n_s8]_m or svsub[_n_s8]_x or svsub[_n_s8]_z + /// public static unsafe Vector Subtract(Vector left, short right); // svsub[_n_s16]_m or svsub[_n_s16]_x or svsub[_n_s16]_z + /// public static unsafe Vector Subtract(Vector left, int right); // svsub[_n_s32]_m or svsub[_n_s32]_x or svsub[_n_s32]_z + /// public static unsafe Vector Subtract(Vector left, long right); // svsub[_n_s64]_m or svsub[_n_s64]_x or svsub[_n_s64]_z + /// public static unsafe Vector Subtract(Vector left, byte right); // svsub[_n_u8]_m or svsub[_n_u8]_x or svsub[_n_u8]_z + /// public static unsafe Vector Subtract(Vector left, ushort right); // svsub[_n_u16]_m or svsub[_n_u16]_x or svsub[_n_u16]_z + /// public static unsafe Vector Subtract(Vector left, uint right); // svsub[_n_u32]_m or svsub[_n_u32]_x or svsub[_n_u32]_z + /// public static unsafe Vector Subtract(Vector left, ulong right); // svsub[_n_u64]_m or svsub[_n_u64]_x or svsub[_n_u64]_z + /// public static unsafe Vector SubtractReversed(Vector left, Vector right); // svsubr[_f32]_m or svsubr[_f32]_x or svsubr[_f32]_z + /// public static unsafe Vector SubtractReversed(Vector left, Vector right); // svsubr[_f64]_m or svsubr[_f64]_x or svsubr[_f64]_z + /// public static unsafe Vector SubtractReversed(Vector left, Vector right); // svsubr[_s8]_m or svsubr[_s8]_x or svsubr[_s8]_z + /// public static unsafe Vector SubtractReversed(Vector left, Vector right); // svsubr[_s16]_m or svsubr[_s16]_x or svsubr[_s16]_z + /// public static unsafe Vector SubtractReversed(Vector left, Vector right); // svsubr[_s32]_m or svsubr[_s32]_x or svsubr[_s32]_z + /// public static unsafe Vector SubtractReversed(Vector left, Vector right); // svsubr[_s64]_m or svsubr[_s64]_x or svsubr[_s64]_z + /// public static unsafe Vector SubtractReversed(Vector left, Vector right); // svsubr[_u8]_m or svsubr[_u8]_x or svsubr[_u8]_z + /// public static unsafe Vector SubtractReversed(Vector left, Vector right); // svsubr[_u16]_m or svsubr[_u16]_x or svsubr[_u16]_z + /// public static unsafe Vector SubtractReversed(Vector left, Vector right); // svsubr[_u32]_m or svsubr[_u32]_x or svsubr[_u32]_z + /// public static unsafe Vector SubtractReversed(Vector left, Vector right); // svsubr[_u64]_m or svsubr[_u64]_x or svsubr[_u64]_z + /// public static unsafe Vector SubtractReversed(Vector left, float right); // svsubr[_n_f32]_m or svsubr[_n_f32]_x or svsubr[_n_f32]_z + /// public static unsafe Vector SubtractReversed(Vector left, double right); // svsubr[_n_f64]_m or svsubr[_n_f64]_x or svsubr[_n_f64]_z + /// public static unsafe Vector SubtractReversed(Vector left, sbyte right); // svsubr[_n_s8]_m or svsubr[_n_s8]_x or svsubr[_n_s8]_z + /// public static unsafe Vector SubtractReversed(Vector left, short right); // svsubr[_n_s16]_m or svsubr[_n_s16]_x or svsubr[_n_s16]_z + /// public static unsafe Vector SubtractReversed(Vector left, int right); // svsubr[_n_s32]_m or svsubr[_n_s32]_x or svsubr[_n_s32]_z + /// public static unsafe Vector SubtractReversed(Vector left, long right); // svsubr[_n_s64]_m or svsubr[_n_s64]_x or svsubr[_n_s64]_z + /// public static unsafe Vector SubtractReversed(Vector left, byte right); // svsubr[_n_u8]_m or svsubr[_n_u8]_x or svsubr[_n_u8]_z + /// public static unsafe Vector SubtractReversed(Vector left, ushort right); // svsubr[_n_u16]_m or svsubr[_n_u16]_x or svsubr[_n_u16]_z + /// public static unsafe Vector SubtractReversed(Vector left, uint right); // svsubr[_n_u32]_m or svsubr[_n_u32]_x or svsubr[_n_u32]_z + /// public static unsafe Vector SubtractReversed(Vector left, ulong right); // svsubr[_n_u64]_m or svsubr[_n_u64]_x or svsubr[_n_u64]_z + /// public static unsafe Vector SubtractSaturate(Vector left, sbyte right); // svqsub[_n_s8] + /// public static unsafe Vector SubtractSaturate(Vector left, short right); // svqsub[_n_s16] + /// public static unsafe Vector SubtractSaturate(Vector left, int right); // svqsub[_n_s32] + /// public static unsafe Vector SubtractSaturate(Vector left, long right); // svqsub[_n_s64] + /// public static unsafe Vector SubtractSaturate(Vector left, byte right); // svqsub[_n_u8] + /// public static unsafe Vector SubtractSaturate(Vector left, ushort right); // svqsub[_n_u16] + /// public static unsafe Vector SubtractSaturate(Vector left, uint right); // svqsub[_n_u32] + /// public static unsafe Vector SubtractSaturate(Vector left, ulong right); // svqsub[_n_u64] + /// Total Rejected: 196 + + /// Total ACLE covered across API: 1038 + diff --git a/sve_api/out_api/apiraw_FEAT_SVE__scatterstores.cs b/sve_api/out_api/apiraw_FEAT_SVE__scatterstores.cs new file mode 100644 index 0000000000000..74171ac21475c --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_SVE__scatterstores.cs @@ -0,0 +1,333 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: scatterstores +{ + + /// T: [float, uint], [int, uint], [uint, uint], [double, ulong], [long, ulong], [ulong, ulong] + public static unsafe void Scatter(Vector mask, Vector addresses, Vector data); // ST1W or ST1D + + /// T: [float, int], [int, int], [uint, int], [float, uint], [int, uint], [uint, uint], [double, long], [long, long], [ulong, long], [double, ulong], [long, ulong], [ulong, ulong] + public static unsafe void Scatter(Vector mask, T* address, Vector indicies, Vector data); // ST1W or ST1D + + /// T: [int, uint], [long, ulong] + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data); // ST1H + + /// T: uint, ulong + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data); // ST1H + + /// T: int, long + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data); // ST1H + + /// T: [uint, int], [ulong, long] + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data); // ST1H + + /// T: [int, uint], [long, ulong] + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data); // ST1H + + /// T: uint, ulong + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data); // ST1H + + /// T: int, long + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data); // ST1H + + /// T: [uint, int], [ulong, long] + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data); // ST1H + + /// T: [int, uint], [long, ulong] + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data); // ST1H + + /// T: uint, ulong + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data); // ST1H + + public static unsafe void Scatter32BitNarrowing(Vector mask, Vector addresses, Vector data); // ST1W + + public static unsafe void Scatter32BitNarrowing(Vector mask, Vector addresses, Vector data); // ST1W + + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector offsets, Vector data); // ST1W + + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector offsets, Vector data); // ST1W + + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector offsets, Vector data); // ST1W + + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector offsets, Vector data); // ST1W + + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector indices, Vector data); // ST1W + + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector indices, Vector data); // ST1W + + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector indices, Vector data); // ST1W + + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector indices, Vector data); // ST1W + + /// T: [int, uint], [long, ulong] + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data); // ST1B + + /// T: uint, ulong + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data); // ST1B + + /// T: int, long + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data); // ST1B + + /// T: [uint, int], [ulong, long] + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data); // ST1B + + /// T: [int, uint], [long, ulong] + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data); // ST1B + + /// T: uint, ulong + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data); // ST1B + + /// total method signatures: 28 + +} + + +/// Full API +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: scatterstores +{ + /// Scatter : Non-truncating store + + /// void svst1_scatter[_u32base_f32](svbool_t pg, svuint32_t bases, svfloat32_t data) : "ST1W Zdata.S, Pg, [Zbases.S, #0]" + public static unsafe void Scatter(Vector mask, Vector addresses, Vector data); + + /// void svst1_scatter[_u32base_s32](svbool_t pg, svuint32_t bases, svint32_t data) : "ST1W Zdata.S, Pg, [Zbases.S, #0]" + public static unsafe void Scatter(Vector mask, Vector addresses, Vector data); + + /// void svst1_scatter[_u32base_u32](svbool_t pg, svuint32_t bases, svuint32_t data) : "ST1W Zdata.S, Pg, [Zbases.S, #0]" + public static unsafe void Scatter(Vector mask, Vector addresses, Vector data); + + /// void svst1_scatter[_u64base_f64](svbool_t pg, svuint64_t bases, svfloat64_t data) : "ST1D Zdata.D, Pg, [Zbases.D, #0]" + public static unsafe void Scatter(Vector mask, Vector addresses, Vector data); + + /// void svst1_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) : "ST1D Zdata.D, Pg, [Zbases.D, #0]" + public static unsafe void Scatter(Vector mask, Vector addresses, Vector data); + + /// void svst1_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) : "ST1D Zdata.D, Pg, [Zbases.D, #0]" + public static unsafe void Scatter(Vector mask, Vector addresses, Vector data); + + /// void svst1_scatter_[s32]offset[_f32](svbool_t pg, float32_t *base, svint32_t offsets, svfloat32_t data) : "ST1W Zdata.S, Pg, [Xbase, Zoffsets.S, SXTW]" + /// void svst1_scatter_[s32]index[_f32](svbool_t pg, float32_t *base, svint32_t indices, svfloat32_t data) : "ST1W Zdata.S, Pg, [Xbase, Zindices.S, SXTW #2]" + public static unsafe void Scatter(Vector mask, float* address, Vector indicies, Vector data); + + /// void svst1_scatter_[s32]offset[_s32](svbool_t pg, int32_t *base, svint32_t offsets, svint32_t data) : "ST1W Zdata.S, Pg, [Xbase, Zoffsets.S, SXTW]" + /// void svst1_scatter_[s32]index[_s32](svbool_t pg, int32_t *base, svint32_t indices, svint32_t data) : "ST1W Zdata.S, Pg, [Xbase, Zindices.S, SXTW #2]" + public static unsafe void Scatter(Vector mask, int* address, Vector indicies, Vector data); + + /// void svst1_scatter_[s32]offset[_u32](svbool_t pg, uint32_t *base, svint32_t offsets, svuint32_t data) : "ST1W Zdata.S, Pg, [Xbase, Zoffsets.S, SXTW]" + /// void svst1_scatter_[s32]index[_u32](svbool_t pg, uint32_t *base, svint32_t indices, svuint32_t data) : "ST1W Zdata.S, Pg, [Xbase, Zindices.S, SXTW #2]" + public static unsafe void Scatter(Vector mask, uint* address, Vector indicies, Vector data); + + /// void svst1_scatter_[u32]offset[_f32](svbool_t pg, float32_t *base, svuint32_t offsets, svfloat32_t data) : "ST1W Zdata.S, Pg, [Xbase, Zoffsets.S, UXTW]" + /// void svst1_scatter_[u32]index[_f32](svbool_t pg, float32_t *base, svuint32_t indices, svfloat32_t data) : "ST1W Zdata.S, Pg, [Xbase, Zindices.S, UXTW #2]" + public static unsafe void Scatter(Vector mask, float* address, Vector indicies, Vector data); + + /// void svst1_scatter_[u32]offset[_s32](svbool_t pg, int32_t *base, svuint32_t offsets, svint32_t data) : "ST1W Zdata.S, Pg, [Xbase, Zoffsets.S, UXTW]" + /// void svst1_scatter_[u32]index[_s32](svbool_t pg, int32_t *base, svuint32_t indices, svint32_t data) : "ST1W Zdata.S, Pg, [Xbase, Zindices.S, UXTW #2]" + public static unsafe void Scatter(Vector mask, int* address, Vector indicies, Vector data); + + /// void svst1_scatter_[u32]offset[_u32](svbool_t pg, uint32_t *base, svuint32_t offsets, svuint32_t data) : "ST1W Zdata.S, Pg, [Xbase, Zoffsets.S, UXTW]" + /// void svst1_scatter_[u32]index[_u32](svbool_t pg, uint32_t *base, svuint32_t indices, svuint32_t data) : "ST1W Zdata.S, Pg, [Xbase, Zindices.S, UXTW #2]" + public static unsafe void Scatter(Vector mask, uint* address, Vector indicies, Vector data); + + /// void svst1_scatter_[s64]offset[_f64](svbool_t pg, float64_t *base, svint64_t offsets, svfloat64_t data) : "ST1D Zdata.D, Pg, [Xbase, Zoffsets.D]" + /// void svst1_scatter_[s64]index[_f64](svbool_t pg, float64_t *base, svint64_t indices, svfloat64_t data) : "ST1D Zdata.D, Pg, [Xbase, Zindices.D, LSL #3]" + public static unsafe void Scatter(Vector mask, double* address, Vector indicies, Vector data); + + /// void svst1_scatter_[s64]offset[_s64](svbool_t pg, int64_t *base, svint64_t offsets, svint64_t data) : "ST1D Zdata.D, Pg, [Xbase, Zoffsets.D]" + /// void svst1_scatter_[s64]index[_s64](svbool_t pg, int64_t *base, svint64_t indices, svint64_t data) : "ST1D Zdata.D, Pg, [Xbase, Zindices.D, LSL #3]" + public static unsafe void Scatter(Vector mask, long* address, Vector indicies, Vector data); + + /// void svst1_scatter_[s64]offset[_u64](svbool_t pg, uint64_t *base, svint64_t offsets, svuint64_t data) : "ST1D Zdata.D, Pg, [Xbase, Zoffsets.D]" + /// void svst1_scatter_[s64]index[_u64](svbool_t pg, uint64_t *base, svint64_t indices, svuint64_t data) : "ST1D Zdata.D, Pg, [Xbase, Zindices.D, LSL #3]" + public static unsafe void Scatter(Vector mask, ulong* address, Vector indicies, Vector data); + + /// void svst1_scatter_[u64]offset[_f64](svbool_t pg, float64_t *base, svuint64_t offsets, svfloat64_t data) : "ST1D Zdata.D, Pg, [Xbase, Zoffsets.D]" + /// void svst1_scatter_[u64]index[_f64](svbool_t pg, float64_t *base, svuint64_t indices, svfloat64_t data) : "ST1D Zdata.D, Pg, [Xbase, Zindices.D, LSL #3]" + public static unsafe void Scatter(Vector mask, double* address, Vector indicies, Vector data); + + /// void svst1_scatter_[u64]offset[_s64](svbool_t pg, int64_t *base, svuint64_t offsets, svint64_t data) : "ST1D Zdata.D, Pg, [Xbase, Zoffsets.D]" + /// void svst1_scatter_[u64]index[_s64](svbool_t pg, int64_t *base, svuint64_t indices, svint64_t data) : "ST1D Zdata.D, Pg, [Xbase, Zindices.D, LSL #3]" + public static unsafe void Scatter(Vector mask, long* address, Vector indicies, Vector data); + + /// void svst1_scatter_[u64]offset[_u64](svbool_t pg, uint64_t *base, svuint64_t offsets, svuint64_t data) : "ST1D Zdata.D, Pg, [Xbase, Zoffsets.D]" + /// void svst1_scatter_[u64]index[_u64](svbool_t pg, uint64_t *base, svuint64_t indices, svuint64_t data) : "ST1D Zdata.D, Pg, [Xbase, Zindices.D, LSL #3]" + public static unsafe void Scatter(Vector mask, ulong* address, Vector indicies, Vector data); + + + /// Scatter16BitNarrowing : Truncate to 16 bits and store + + /// void svst1h_scatter[_u32base_s32](svbool_t pg, svuint32_t bases, svint32_t data) : "ST1H Zdata.S, Pg, [Zbases.S, #0]" + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data); + + /// void svst1h_scatter[_u32base_u32](svbool_t pg, svuint32_t bases, svuint32_t data) : "ST1H Zdata.S, Pg, [Zbases.S, #0]" + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data); + + /// void svst1h_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) : "ST1H Zdata.D, Pg, [Zbases.D, #0]" + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data); + + /// void svst1h_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) : "ST1H Zdata.D, Pg, [Zbases.D, #0]" + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data); + + + /// Scatter16BitWithByteOffsetsNarrowing : Truncate to 16 bits and store + + /// void svst1h_scatter_[s32]offset[_s32](svbool_t pg, int16_t *base, svint32_t offsets, svint32_t data) : "ST1H Zdata.S, Pg, [Xbase, Zoffsets.S, SXTW]" + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data); + + /// void svst1h_scatter_[s32]offset[_u32](svbool_t pg, uint16_t *base, svint32_t offsets, svuint32_t data) : "ST1H Zdata.S, Pg, [Xbase, Zoffsets.S, SXTW]" + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data); + + /// void svst1h_scatter_[u32]offset[_s32](svbool_t pg, int16_t *base, svuint32_t offsets, svint32_t data) : "ST1H Zdata.S, Pg, [Xbase, Zoffsets.S, UXTW]" + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data); + + /// void svst1h_scatter_[u32]offset[_u32](svbool_t pg, uint16_t *base, svuint32_t offsets, svuint32_t data) : "ST1H Zdata.S, Pg, [Xbase, Zoffsets.S, UXTW]" + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data); + + /// void svst1h_scatter_[s64]offset[_s64](svbool_t pg, int16_t *base, svint64_t offsets, svint64_t data) : "ST1H Zdata.D, Pg, [Xbase, Zoffsets.D]" + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data); + + /// void svst1h_scatter_[s64]offset[_u64](svbool_t pg, uint16_t *base, svint64_t offsets, svuint64_t data) : "ST1H Zdata.D, Pg, [Xbase, Zoffsets.D]" + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data); + + /// void svst1h_scatter_[u64]offset[_s64](svbool_t pg, int16_t *base, svuint64_t offsets, svint64_t data) : "ST1H Zdata.D, Pg, [Xbase, Zoffsets.D]" + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data); + + /// void svst1h_scatter_[u64]offset[_u64](svbool_t pg, uint16_t *base, svuint64_t offsets, svuint64_t data) : "ST1H Zdata.D, Pg, [Xbase, Zoffsets.D]" + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data); + + /// void svst1h_scatter_[s32]index[_s32](svbool_t pg, int16_t *base, svint32_t indices, svint32_t data) : "ST1H Zdata.S, Pg, [Xbase, Zindices.S, SXTW #1]" + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data); + + /// void svst1h_scatter_[s32]index[_u32](svbool_t pg, uint16_t *base, svint32_t indices, svuint32_t data) : "ST1H Zdata.S, Pg, [Xbase, Zindices.S, SXTW #1]" + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data); + + /// void svst1h_scatter_[u32]index[_s32](svbool_t pg, int16_t *base, svuint32_t indices, svint32_t data) : "ST1H Zdata.S, Pg, [Xbase, Zindices.S, UXTW #1]" + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data); + + /// void svst1h_scatter_[u32]index[_u32](svbool_t pg, uint16_t *base, svuint32_t indices, svuint32_t data) : "ST1H Zdata.S, Pg, [Xbase, Zindices.S, UXTW #1]" + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data); + + /// void svst1h_scatter_[s64]index[_s64](svbool_t pg, int16_t *base, svint64_t indices, svint64_t data) : "ST1H Zdata.D, Pg, [Xbase, Zindices.D, LSL #1]" + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data); + + /// void svst1h_scatter_[s64]index[_u64](svbool_t pg, uint16_t *base, svint64_t indices, svuint64_t data) : "ST1H Zdata.D, Pg, [Xbase, Zindices.D, LSL #1]" + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data); + + /// void svst1h_scatter_[u64]index[_s64](svbool_t pg, int16_t *base, svuint64_t indices, svint64_t data) : "ST1H Zdata.D, Pg, [Xbase, Zindices.D, LSL #1]" + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data); + + /// void svst1h_scatter_[u64]index[_u64](svbool_t pg, uint16_t *base, svuint64_t indices, svuint64_t data) : "ST1H Zdata.D, Pg, [Xbase, Zindices.D, LSL #1]" + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data); + + + /// Scatter32BitNarrowing : Truncate to 32 bits and store + + /// void svst1w_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) : "ST1W Zdata.D, Pg, [Zbases.D, #0]" + public static unsafe void Scatter32BitNarrowing(Vector mask, Vector addresses, Vector data); + + /// void svst1w_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) : "ST1W Zdata.D, Pg, [Zbases.D, #0]" + public static unsafe void Scatter32BitNarrowing(Vector mask, Vector addresses, Vector data); + + + /// Scatter32BitWithByteOffsetsNarrowing : Truncate to 32 bits and store + + /// void svst1w_scatter_[s64]offset[_s64](svbool_t pg, int32_t *base, svint64_t offsets, svint64_t data) : "ST1W Zdata.D, Pg, [Xbase, Zoffsets.D]" + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector offsets, Vector data); + + /// void svst1w_scatter_[s64]offset[_u64](svbool_t pg, uint32_t *base, svint64_t offsets, svuint64_t data) : "ST1W Zdata.D, Pg, [Xbase, Zoffsets.D]" + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector offsets, Vector data); + + /// void svst1w_scatter_[u64]offset[_s64](svbool_t pg, int32_t *base, svuint64_t offsets, svint64_t data) : "ST1W Zdata.D, Pg, [Xbase, Zoffsets.D]" + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector offsets, Vector data); + + /// void svst1w_scatter_[u64]offset[_u64](svbool_t pg, uint32_t *base, svuint64_t offsets, svuint64_t data) : "ST1W Zdata.D, Pg, [Xbase, Zoffsets.D]" + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector offsets, Vector data); + + /// void svst1w_scatter_[s64]index[_s64](svbool_t pg, int32_t *base, svint64_t indices, svint64_t data) : "ST1W Zdata.D, Pg, [Xbase, Zindices.D, LSL #2]" + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector indices, Vector data); + + /// void svst1w_scatter_[s64]index[_u64](svbool_t pg, uint32_t *base, svint64_t indices, svuint64_t data) : "ST1W Zdata.D, Pg, [Xbase, Zindices.D, LSL #2]" + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector indices, Vector data); + + /// void svst1w_scatter_[u64]index[_s64](svbool_t pg, int32_t *base, svuint64_t indices, svint64_t data) : "ST1W Zdata.D, Pg, [Xbase, Zindices.D, LSL #2]" + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector indices, Vector data); + + /// void svst1w_scatter_[u64]index[_u64](svbool_t pg, uint32_t *base, svuint64_t indices, svuint64_t data) : "ST1W Zdata.D, Pg, [Xbase, Zindices.D, LSL #2]" + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector indices, Vector data); + + + /// Scatter8BitNarrowing : Truncate to 8 bits and store + + /// void svst1b_scatter[_u32base_s32](svbool_t pg, svuint32_t bases, svint32_t data) : "ST1B Zdata.S, Pg, [Zbases.S, #0]" + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data); + + /// void svst1b_scatter[_u32base_u32](svbool_t pg, svuint32_t bases, svuint32_t data) : "ST1B Zdata.S, Pg, [Zbases.S, #0]" + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data); + + /// void svst1b_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) : "ST1B Zdata.D, Pg, [Zbases.D, #0]" + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data); + + /// void svst1b_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) : "ST1B Zdata.D, Pg, [Zbases.D, #0]" + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data); + + + /// Scatter8BitWithByteOffsetsNarrowing : Truncate to 8 bits and store + + /// void svst1b_scatter_[s32]offset[_s32](svbool_t pg, int8_t *base, svint32_t offsets, svint32_t data) : "ST1B Zdata.S, Pg, [Xbase, Zoffsets.S, SXTW]" + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data); + + /// void svst1b_scatter_[s32]offset[_u32](svbool_t pg, uint8_t *base, svint32_t offsets, svuint32_t data) : "ST1B Zdata.S, Pg, [Xbase, Zoffsets.S, SXTW]" + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data); + + /// void svst1b_scatter_[u32]offset[_s32](svbool_t pg, int8_t *base, svuint32_t offsets, svint32_t data) : "ST1B Zdata.S, Pg, [Xbase, Zoffsets.S, UXTW]" + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data); + + /// void svst1b_scatter_[u32]offset[_u32](svbool_t pg, uint8_t *base, svuint32_t offsets, svuint32_t data) : "ST1B Zdata.S, Pg, [Xbase, Zoffsets.S, UXTW]" + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data); + + /// void svst1b_scatter_[s64]offset[_s64](svbool_t pg, int8_t *base, svint64_t offsets, svint64_t data) : "ST1B Zdata.D, Pg, [Xbase, Zoffsets.D]" + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data); + + /// void svst1b_scatter_[s64]offset[_u64](svbool_t pg, uint8_t *base, svint64_t offsets, svuint64_t data) : "ST1B Zdata.D, Pg, [Xbase, Zoffsets.D]" + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data); + + /// void svst1b_scatter_[u64]offset[_s64](svbool_t pg, int8_t *base, svuint64_t offsets, svint64_t data) : "ST1B Zdata.D, Pg, [Xbase, Zoffsets.D]" + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data); + + /// void svst1b_scatter_[u64]offset[_u64](svbool_t pg, uint8_t *base, svuint64_t offsets, svuint64_t data) : "ST1B Zdata.D, Pg, [Xbase, Zoffsets.D]" + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data); + + + /// total method signatures: 60 + /// total method names: 7 +} + + + /// Rejected: + /// public static unsafe void Scatter(Vector mask, Vector address, long indicies, Vector data); // svst1_scatter[_u32base]_offset[_f32] or svst1_scatter[_u32base]_index[_f32] + /// public static unsafe void Scatter(Vector mask, Vector address, long indicies, Vector data); // svst1_scatter[_u32base]_offset[_s32] or svst1_scatter[_u32base]_index[_s32] + /// public static unsafe void Scatter(Vector mask, Vector address, long indicies, Vector data); // svst1_scatter[_u32base]_offset[_u32] or svst1_scatter[_u32base]_index[_u32] + /// public static unsafe void Scatter(Vector mask, Vector address, long indicies, Vector data); // svst1_scatter[_u64base]_offset[_f64] or svst1_scatter[_u64base]_index[_f64] + /// public static unsafe void Scatter(Vector mask, Vector address, long indicies, Vector data); // svst1_scatter[_u64base]_offset[_s64] or svst1_scatter[_u64base]_index[_s64] + /// public static unsafe void Scatter(Vector mask, Vector address, long indicies, Vector data); // svst1_scatter[_u64base]_offset[_u64] or svst1_scatter[_u64base]_index[_u64] + /// public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, Vector address, long offset, Vector data); // svst1h_scatter[_u32base]_offset[_s32] + /// public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, Vector address, long offset, Vector data); // svst1h_scatter[_u32base]_offset[_u32] + /// public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, Vector address, long offset, Vector data); // svst1h_scatter[_u64base]_offset[_s64] + /// public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, Vector address, long offset, Vector data); // svst1h_scatter[_u64base]_offset[_u64] + /// public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, Vector address, long index, Vector data); // svst1h_scatter[_u32base]_index[_s32] + /// public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, Vector address, long index, Vector data); // svst1h_scatter[_u32base]_index[_u32] + /// public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, Vector address, long index, Vector data); // svst1h_scatter[_u64base]_index[_s64] + /// public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, Vector address, long index, Vector data); // svst1h_scatter[_u64base]_index[_u64] + /// public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, Vector address, long offset, Vector data); // svst1w_scatter[_u64base]_offset[_s64] + /// public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, Vector address, long offset, Vector data); // svst1w_scatter[_u64base]_offset[_u64] + /// public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, Vector address, long index, Vector data); // svst1w_scatter[_u64base]_index[_s64] + /// public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, Vector address, long index, Vector data); // svst1w_scatter[_u64base]_index[_u64] + /// public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, Vector address, long offset, Vector data); // svst1b_scatter[_u32base]_offset[_s32] + /// public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, Vector address, long offset, Vector data); // svst1b_scatter[_u32base]_offset[_u32] + /// public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, Vector address, long offset, Vector data); // svst1b_scatter[_u64base]_offset[_s64] + /// public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, Vector address, long offset, Vector data); // svst1b_scatter[_u64base]_offset[_u64] + /// Total Rejected: 22 + + /// Total ACLE covered across API: 100 + diff --git a/sve_api/out_api/apiraw_FEAT_SVE__stores.cs b/sve_api/out_api/apiraw_FEAT_SVE__stores.cs new file mode 100644 index 0000000000000..65cabb49fa4cc --- /dev/null +++ b/sve_api/out_api/apiraw_FEAT_SVE__stores.cs @@ -0,0 +1,312 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: stores +{ + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe void Store(Vector mask, T* address, Vector data); // ST1W or ST1D or ST1B or ST1H + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe void Store(Vector mask, T* address, (Vector Value1, Vector Value2) data); // ST2W or ST2D or ST2B or ST2H + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe void Store(Vector mask, T* address, (Vector Value1, Vector Value2, Vector Value3) data); // ST3W or ST3D or ST3B or ST3H + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe void Store(Vector mask, T* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data); // ST4W or ST4D or ST4B or ST4H + + /// T: short, int, long + public static unsafe void StoreNarrowing(Vector mask, sbyte* address, Vector data); // ST1B + + /// T: ushort, uint, ulong + public static unsafe void StoreNarrowing(Vector mask, byte* address, Vector data); // ST1B + + /// T: int, long + public static unsafe void StoreNarrowing(Vector mask, short* address, Vector data); // ST1H + + /// T: uint, ulong + public static unsafe void StoreNarrowing(Vector mask, ushort* address, Vector data); // ST1H + + public static unsafe void StoreNarrowing(Vector mask, int* address, Vector data); // ST1W + + public static unsafe void StoreNarrowing(Vector mask, uint* address, Vector data); // ST1W + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe void StoreNonTemporal(Vector mask, T* address, Vector data); // STNT1W or STNT1D or STNT1B or STNT1H + + /// total method signatures: 11 + +} + + +/// Full API +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: stores +{ + /// Store : Non-truncating store + + /// void svst1[_f32](svbool_t pg, float32_t *base, svfloat32_t data) : "ST1W Zdata.S, Pg, [Xarray, Xindex, LSL #2]" or "ST1W Zdata.S, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, float* address, Vector data); + + /// void svst1[_f64](svbool_t pg, float64_t *base, svfloat64_t data) : "ST1D Zdata.D, Pg, [Xarray, Xindex, LSL #3]" or "ST1D Zdata.D, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, double* address, Vector data); + + /// void svst1[_s8](svbool_t pg, int8_t *base, svint8_t data) : "ST1B Zdata.B, Pg, [Xarray, Xindex]" or "ST1B Zdata.B, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, sbyte* address, Vector data); + + /// void svst1[_s16](svbool_t pg, int16_t *base, svint16_t data) : "ST1H Zdata.H, Pg, [Xarray, Xindex, LSL #1]" or "ST1H Zdata.H, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, short* address, Vector data); + + /// void svst1[_s32](svbool_t pg, int32_t *base, svint32_t data) : "ST1W Zdata.S, Pg, [Xarray, Xindex, LSL #2]" or "ST1W Zdata.S, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, int* address, Vector data); + + /// void svst1[_s64](svbool_t pg, int64_t *base, svint64_t data) : "ST1D Zdata.D, Pg, [Xarray, Xindex, LSL #3]" or "ST1D Zdata.D, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, long* address, Vector data); + + /// void svst1[_u8](svbool_t pg, uint8_t *base, svuint8_t data) : "ST1B Zdata.B, Pg, [Xarray, Xindex]" or "ST1B Zdata.B, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, byte* address, Vector data); + + /// void svst1[_u16](svbool_t pg, uint16_t *base, svuint16_t data) : "ST1H Zdata.H, Pg, [Xarray, Xindex, LSL #1]" or "ST1H Zdata.H, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, ushort* address, Vector data); + + /// void svst1[_u32](svbool_t pg, uint32_t *base, svuint32_t data) : "ST1W Zdata.S, Pg, [Xarray, Xindex, LSL #2]" or "ST1W Zdata.S, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, uint* address, Vector data); + + /// void svst1[_u64](svbool_t pg, uint64_t *base, svuint64_t data) : "ST1D Zdata.D, Pg, [Xarray, Xindex, LSL #3]" or "ST1D Zdata.D, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, ulong* address, Vector data); + + /// void svst2[_f32](svbool_t pg, float32_t *base, svfloat32x2_t data) : "ST2W {Zdata0.S, Zdata1.S}, Pg, [Xarray, Xindex, LSL #2]" or "ST2W {Zdata0.S, Zdata1.S}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, float* address, (Vector Value1, Vector Value2) data); + + /// void svst2[_f64](svbool_t pg, float64_t *base, svfloat64x2_t data) : "ST2D {Zdata0.D, Zdata1.D}, Pg, [Xarray, Xindex, LSL #3]" or "ST2D {Zdata0.D, Zdata1.D}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, double* address, (Vector Value1, Vector Value2) data); + + /// void svst2[_s8](svbool_t pg, int8_t *base, svint8x2_t data) : "ST2B {Zdata0.B, Zdata1.B}, Pg, [Xarray, Xindex]" or "ST2B {Zdata0.B, Zdata1.B}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, sbyte* address, (Vector Value1, Vector Value2) data); + + /// void svst2[_s16](svbool_t pg, int16_t *base, svint16x2_t data) : "ST2H {Zdata0.H, Zdata1.H}, Pg, [Xarray, Xindex, LSL #1]" or "ST2H {Zdata0.H, Zdata1.H}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, short* address, (Vector Value1, Vector Value2) data); + + /// void svst2[_s32](svbool_t pg, int32_t *base, svint32x2_t data) : "ST2W {Zdata0.S, Zdata1.S}, Pg, [Xarray, Xindex, LSL #2]" or "ST2W {Zdata0.S, Zdata1.S}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, int* address, (Vector Value1, Vector Value2) data); + + /// void svst2[_s64](svbool_t pg, int64_t *base, svint64x2_t data) : "ST2D {Zdata0.D, Zdata1.D}, Pg, [Xarray, Xindex, LSL #3]" or "ST2D {Zdata0.D, Zdata1.D}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, long* address, (Vector Value1, Vector Value2) data); + + /// void svst2[_u8](svbool_t pg, uint8_t *base, svuint8x2_t data) : "ST2B {Zdata0.B, Zdata1.B}, Pg, [Xarray, Xindex]" or "ST2B {Zdata0.B, Zdata1.B}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, byte* address, (Vector Value1, Vector Value2) data); + + /// void svst2[_u16](svbool_t pg, uint16_t *base, svuint16x2_t data) : "ST2H {Zdata0.H, Zdata1.H}, Pg, [Xarray, Xindex, LSL #1]" or "ST2H {Zdata0.H, Zdata1.H}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, ushort* address, (Vector Value1, Vector Value2) data); + + /// void svst2[_u32](svbool_t pg, uint32_t *base, svuint32x2_t data) : "ST2W {Zdata0.S, Zdata1.S}, Pg, [Xarray, Xindex, LSL #2]" or "ST2W {Zdata0.S, Zdata1.S}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, uint* address, (Vector Value1, Vector Value2) data); + + /// void svst2[_u64](svbool_t pg, uint64_t *base, svuint64x2_t data) : "ST2D {Zdata0.D, Zdata1.D}, Pg, [Xarray, Xindex, LSL #3]" or "ST2D {Zdata0.D, Zdata1.D}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, ulong* address, (Vector Value1, Vector Value2) data); + + /// void svst3[_f32](svbool_t pg, float32_t *base, svfloat32x3_t data) : "ST3W {Zdata0.S - Zdata2.S}, Pg, [Xarray, Xindex, LSL #2]" or "ST3W {Zdata0.S - Zdata2.S}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, float* address, (Vector Value1, Vector Value2, Vector Value3) data); + + /// void svst3[_f64](svbool_t pg, float64_t *base, svfloat64x3_t data) : "ST3D {Zdata0.D - Zdata2.D}, Pg, [Xarray, Xindex, LSL #3]" or "ST3D {Zdata0.D - Zdata2.D}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, double* address, (Vector Value1, Vector Value2, Vector Value3) data); + + /// void svst3[_s8](svbool_t pg, int8_t *base, svint8x3_t data) : "ST3B {Zdata0.B - Zdata2.B}, Pg, [Xarray, Xindex]" or "ST3B {Zdata0.B - Zdata2.B}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, sbyte* address, (Vector Value1, Vector Value2, Vector Value3) data); + + /// void svst3[_s16](svbool_t pg, int16_t *base, svint16x3_t data) : "ST3H {Zdata0.H - Zdata2.H}, Pg, [Xarray, Xindex, LSL #1]" or "ST3H {Zdata0.H - Zdata2.H}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, short* address, (Vector Value1, Vector Value2, Vector Value3) data); + + /// void svst3[_s32](svbool_t pg, int32_t *base, svint32x3_t data) : "ST3W {Zdata0.S - Zdata2.S}, Pg, [Xarray, Xindex, LSL #2]" or "ST3W {Zdata0.S - Zdata2.S}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, int* address, (Vector Value1, Vector Value2, Vector Value3) data); + + /// void svst3[_s64](svbool_t pg, int64_t *base, svint64x3_t data) : "ST3D {Zdata0.D - Zdata2.D}, Pg, [Xarray, Xindex, LSL #3]" or "ST3D {Zdata0.D - Zdata2.D}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, long* address, (Vector Value1, Vector Value2, Vector Value3) data); + + /// void svst3[_u8](svbool_t pg, uint8_t *base, svuint8x3_t data) : "ST3B {Zdata0.B - Zdata2.B}, Pg, [Xarray, Xindex]" or "ST3B {Zdata0.B - Zdata2.B}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, byte* address, (Vector Value1, Vector Value2, Vector Value3) data); + + /// void svst3[_u16](svbool_t pg, uint16_t *base, svuint16x3_t data) : "ST3H {Zdata0.H - Zdata2.H}, Pg, [Xarray, Xindex, LSL #1]" or "ST3H {Zdata0.H - Zdata2.H}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, ushort* address, (Vector Value1, Vector Value2, Vector Value3) data); + + /// void svst3[_u32](svbool_t pg, uint32_t *base, svuint32x3_t data) : "ST3W {Zdata0.S - Zdata2.S}, Pg, [Xarray, Xindex, LSL #2]" or "ST3W {Zdata0.S - Zdata2.S}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, uint* address, (Vector Value1, Vector Value2, Vector Value3) data); + + /// void svst3[_u64](svbool_t pg, uint64_t *base, svuint64x3_t data) : "ST3D {Zdata0.D - Zdata2.D}, Pg, [Xarray, Xindex, LSL #3]" or "ST3D {Zdata0.D - Zdata2.D}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, ulong* address, (Vector Value1, Vector Value2, Vector Value3) data); + + /// void svst4[_f32](svbool_t pg, float32_t *base, svfloat32x4_t data) : "ST4W {Zdata0.S - Zdata3.S}, Pg, [Xarray, Xindex, LSL #2]" or "ST4W {Zdata0.S - Zdata3.S}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, float* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data); + + /// void svst4[_f64](svbool_t pg, float64_t *base, svfloat64x4_t data) : "ST4D {Zdata0.D - Zdata3.D}, Pg, [Xarray, Xindex, LSL #3]" or "ST4D {Zdata0.D - Zdata3.D}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, double* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data); + + /// void svst4[_s8](svbool_t pg, int8_t *base, svint8x4_t data) : "ST4B {Zdata0.B - Zdata3.B}, Pg, [Xarray, Xindex]" or "ST4B {Zdata0.B - Zdata3.B}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, sbyte* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data); + + /// void svst4[_s16](svbool_t pg, int16_t *base, svint16x4_t data) : "ST4H {Zdata0.H - Zdata3.H}, Pg, [Xarray, Xindex, LSL #1]" or "ST4H {Zdata0.H - Zdata3.H}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, short* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data); + + /// void svst4[_s32](svbool_t pg, int32_t *base, svint32x4_t data) : "ST4W {Zdata0.S - Zdata3.S}, Pg, [Xarray, Xindex, LSL #2]" or "ST4W {Zdata0.S - Zdata3.S}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, int* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data); + + /// void svst4[_s64](svbool_t pg, int64_t *base, svint64x4_t data) : "ST4D {Zdata0.D - Zdata3.D}, Pg, [Xarray, Xindex, LSL #3]" or "ST4D {Zdata0.D - Zdata3.D}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, long* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data); + + /// void svst4[_u8](svbool_t pg, uint8_t *base, svuint8x4_t data) : "ST4B {Zdata0.B - Zdata3.B}, Pg, [Xarray, Xindex]" or "ST4B {Zdata0.B - Zdata3.B}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, byte* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data); + + /// void svst4[_u16](svbool_t pg, uint16_t *base, svuint16x4_t data) : "ST4H {Zdata0.H - Zdata3.H}, Pg, [Xarray, Xindex, LSL #1]" or "ST4H {Zdata0.H - Zdata3.H}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, ushort* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data); + + /// void svst4[_u32](svbool_t pg, uint32_t *base, svuint32x4_t data) : "ST4W {Zdata0.S - Zdata3.S}, Pg, [Xarray, Xindex, LSL #2]" or "ST4W {Zdata0.S - Zdata3.S}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, uint* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data); + + /// void svst4[_u64](svbool_t pg, uint64_t *base, svuint64x4_t data) : "ST4D {Zdata0.D - Zdata3.D}, Pg, [Xarray, Xindex, LSL #3]" or "ST4D {Zdata0.D - Zdata3.D}, Pg, [Xbase, #0, MUL VL]" + public static unsafe void Store(Vector mask, ulong* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data); + + + /// StoreNarrowing : Truncate to 8 bits and store + + /// void svst1b[_s16](svbool_t pg, int8_t *base, svint16_t data) : "ST1B Zdata.H, Pg, [Xarray, Xindex]" or "ST1B Zdata.H, Pg, [Xbase, #0, MUL VL]" + public static unsafe void StoreNarrowing(Vector mask, sbyte* address, Vector data); + + /// void svst1b[_s32](svbool_t pg, int8_t *base, svint32_t data) : "ST1B Zdata.S, Pg, [Xarray, Xindex]" or "ST1B Zdata.S, Pg, [Xbase, #0, MUL VL]" + public static unsafe void StoreNarrowing(Vector mask, sbyte* address, Vector data); + + /// void svst1b[_s64](svbool_t pg, int8_t *base, svint64_t data) : "ST1B Zdata.D, Pg, [Xarray, Xindex]" or "ST1B Zdata.D, Pg, [Xbase, #0, MUL VL]" + public static unsafe void StoreNarrowing(Vector mask, sbyte* address, Vector data); + + /// void svst1b[_u16](svbool_t pg, uint8_t *base, svuint16_t data) : "ST1B Zdata.H, Pg, [Xarray, Xindex]" or "ST1B Zdata.H, Pg, [Xbase, #0, MUL VL]" + public static unsafe void StoreNarrowing(Vector mask, byte* address, Vector data); + + /// void svst1b[_u32](svbool_t pg, uint8_t *base, svuint32_t data) : "ST1B Zdata.S, Pg, [Xarray, Xindex]" or "ST1B Zdata.S, Pg, [Xbase, #0, MUL VL]" + public static unsafe void StoreNarrowing(Vector mask, byte* address, Vector data); + + /// void svst1b[_u64](svbool_t pg, uint8_t *base, svuint64_t data) : "ST1B Zdata.D, Pg, [Xarray, Xindex]" or "ST1B Zdata.D, Pg, [Xbase, #0, MUL VL]" + public static unsafe void StoreNarrowing(Vector mask, byte* address, Vector data); + + /// void svst1h[_s32](svbool_t pg, int16_t *base, svint32_t data) : "ST1H Zdata.S, Pg, [Xarray, Xindex, LSL #1]" or "ST1H Zdata.S, Pg, [Xbase, #0, MUL VL]" + public static unsafe void StoreNarrowing(Vector mask, short* address, Vector data); + + /// void svst1h[_s64](svbool_t pg, int16_t *base, svint64_t data) : "ST1H Zdata.D, Pg, [Xarray, Xindex, LSL #1]" or "ST1H Zdata.D, Pg, [Xbase, #0, MUL VL]" + public static unsafe void StoreNarrowing(Vector mask, short* address, Vector data); + + /// void svst1h[_u32](svbool_t pg, uint16_t *base, svuint32_t data) : "ST1H Zdata.S, Pg, [Xarray, Xindex, LSL #1]" or "ST1H Zdata.S, Pg, [Xbase, #0, MUL VL]" + public static unsafe void StoreNarrowing(Vector mask, ushort* address, Vector data); + + /// void svst1h[_u64](svbool_t pg, uint16_t *base, svuint64_t data) : "ST1H Zdata.D, Pg, [Xarray, Xindex, LSL #1]" or "ST1H Zdata.D, Pg, [Xbase, #0, MUL VL]" + public static unsafe void StoreNarrowing(Vector mask, ushort* address, Vector data); + + /// void svst1w[_s64](svbool_t pg, int32_t *base, svint64_t data) : "ST1W Zdata.D, Pg, [Xarray, Xindex, LSL #2]" or "ST1W Zdata.D, Pg, [Xbase, #0, MUL VL]" + public static unsafe void StoreNarrowing(Vector mask, int* address, Vector data); + + /// void svst1w[_u64](svbool_t pg, uint32_t *base, svuint64_t data) : "ST1W Zdata.D, Pg, [Xarray, Xindex, LSL #2]" or "ST1W Zdata.D, Pg, [Xbase, #0, MUL VL]" + public static unsafe void StoreNarrowing(Vector mask, uint* address, Vector data); + + + /// StoreNonTemporal : Non-truncating store, non-temporal + + /// void svstnt1[_f32](svbool_t pg, float32_t *base, svfloat32_t data) : "STNT1W Zdata.S, Pg, [Xarray, Xindex, LSL #2]" or "STNT1W Zdata.S, Pg, [Xbase, #0, MUL VL]" + public static unsafe void StoreNonTemporal(Vector mask, float* address, Vector data); + + /// void svstnt1[_f64](svbool_t pg, float64_t *base, svfloat64_t data) : "STNT1D Zdata.D, Pg, [Xarray, Xindex, LSL #3]" or "STNT1D Zdata.D, Pg, [Xbase, #0, MUL VL]" + public static unsafe void StoreNonTemporal(Vector mask, double* address, Vector data); + + /// void svstnt1[_s8](svbool_t pg, int8_t *base, svint8_t data) : "STNT1B Zdata.B, Pg, [Xarray, Xindex]" or "STNT1B Zdata.B, Pg, [Xbase, #0, MUL VL]" + public static unsafe void StoreNonTemporal(Vector mask, sbyte* address, Vector data); + + /// void svstnt1[_s16](svbool_t pg, int16_t *base, svint16_t data) : "STNT1H Zdata.H, Pg, [Xarray, Xindex, LSL #1]" or "STNT1H Zdata.H, Pg, [Xbase, #0, MUL VL]" + public static unsafe void StoreNonTemporal(Vector mask, short* address, Vector data); + + /// void svstnt1[_s32](svbool_t pg, int32_t *base, svint32_t data) : "STNT1W Zdata.S, Pg, [Xarray, Xindex, LSL #2]" or "STNT1W Zdata.S, Pg, [Xbase, #0, MUL VL]" + public static unsafe void StoreNonTemporal(Vector mask, int* address, Vector data); + + /// void svstnt1[_s64](svbool_t pg, int64_t *base, svint64_t data) : "STNT1D Zdata.D, Pg, [Xarray, Xindex, LSL #3]" or "STNT1D Zdata.D, Pg, [Xbase, #0, MUL VL]" + public static unsafe void StoreNonTemporal(Vector mask, long* address, Vector data); + + /// void svstnt1[_u8](svbool_t pg, uint8_t *base, svuint8_t data) : "STNT1B Zdata.B, Pg, [Xarray, Xindex]" or "STNT1B Zdata.B, Pg, [Xbase, #0, MUL VL]" + public static unsafe void StoreNonTemporal(Vector mask, byte* address, Vector data); + + /// void svstnt1[_u16](svbool_t pg, uint16_t *base, svuint16_t data) : "STNT1H Zdata.H, Pg, [Xarray, Xindex, LSL #1]" or "STNT1H Zdata.H, Pg, [Xbase, #0, MUL VL]" + public static unsafe void StoreNonTemporal(Vector mask, ushort* address, Vector data); + + /// void svstnt1[_u32](svbool_t pg, uint32_t *base, svuint32_t data) : "STNT1W Zdata.S, Pg, [Xarray, Xindex, LSL #2]" or "STNT1W Zdata.S, Pg, [Xbase, #0, MUL VL]" + public static unsafe void StoreNonTemporal(Vector mask, uint* address, Vector data); + + /// void svstnt1[_u64](svbool_t pg, uint64_t *base, svuint64_t data) : "STNT1D Zdata.D, Pg, [Xarray, Xindex, LSL #3]" or "STNT1D Zdata.D, Pg, [Xbase, #0, MUL VL]" + public static unsafe void StoreNonTemporal(Vector mask, ulong* address, Vector data); + + + /// total method signatures: 62 + /// total method names: 3 +} + + + /// Rejected: + /// public static unsafe void Store(Vector mask, float* base, long vnum, Vector data); // svst1_vnum[_f32] + /// public static unsafe void Store(Vector mask, double* base, long vnum, Vector data); // svst1_vnum[_f64] + /// public static unsafe void Store(Vector mask, sbyte* base, long vnum, Vector data); // svst1_vnum[_s8] + /// public static unsafe void Store(Vector mask, short* base, long vnum, Vector data); // svst1_vnum[_s16] + /// public static unsafe void Store(Vector mask, int* base, long vnum, Vector data); // svst1_vnum[_s32] + /// public static unsafe void Store(Vector mask, long* base, long vnum, Vector data); // svst1_vnum[_s64] + /// public static unsafe void Store(Vector mask, byte* base, long vnum, Vector data); // svst1_vnum[_u8] + /// public static unsafe void Store(Vector mask, ushort* base, long vnum, Vector data); // svst1_vnum[_u16] + /// public static unsafe void Store(Vector mask, uint* base, long vnum, Vector data); // svst1_vnum[_u32] + /// public static unsafe void Store(Vector mask, ulong* base, long vnum, Vector data); // svst1_vnum[_u64] + /// public static unsafe void Store(Vector mask, float* base, long vnum, (Vector data1, Vector data2)); // svst2_vnum[_f32] + /// public static unsafe void Store(Vector mask, double* base, long vnum, (Vector data1, Vector data2)); // svst2_vnum[_f64] + /// public static unsafe void Store(Vector mask, sbyte* base, long vnum, (Vector data1, Vector data2)); // svst2_vnum[_s8] + /// public static unsafe void Store(Vector mask, short* base, long vnum, (Vector data1, Vector data2)); // svst2_vnum[_s16] + /// public static unsafe void Store(Vector mask, int* base, long vnum, (Vector data1, Vector data2)); // svst2_vnum[_s32] + /// public static unsafe void Store(Vector mask, long* base, long vnum, (Vector data1, Vector data2)); // svst2_vnum[_s64] + /// public static unsafe void Store(Vector mask, byte* base, long vnum, (Vector data1, Vector data2)); // svst2_vnum[_u8] + /// public static unsafe void Store(Vector mask, ushort* base, long vnum, (Vector data1, Vector data2)); // svst2_vnum[_u16] + /// public static unsafe void Store(Vector mask, uint* base, long vnum, (Vector data1, Vector data2)); // svst2_vnum[_u32] + /// public static unsafe void Store(Vector mask, ulong* base, long vnum, (Vector data1, Vector data2)); // svst2_vnum[_u64] + /// public static unsafe void Store(Vector mask, float* base, long vnum, (Vector data1, Vector data2, Vector data3)); // svst3_vnum[_f32] + /// public static unsafe void Store(Vector mask, double* base, long vnum, (Vector data1, Vector data2, Vector data3)); // svst3_vnum[_f64] + /// public static unsafe void Store(Vector mask, sbyte* base, long vnum, (Vector data1, Vector data2, Vector data3)); // svst3_vnum[_s8] + /// public static unsafe void Store(Vector mask, short* base, long vnum, (Vector data1, Vector data2, Vector data3)); // svst3_vnum[_s16] + /// public static unsafe void Store(Vector mask, int* base, long vnum, (Vector data1, Vector data2, Vector data3)); // svst3_vnum[_s32] + /// public static unsafe void Store(Vector mask, long* base, long vnum, (Vector data1, Vector data2, Vector data3)); // svst3_vnum[_s64] + /// public static unsafe void Store(Vector mask, byte* base, long vnum, (Vector data1, Vector data2, Vector data3)); // svst3_vnum[_u8] + /// public static unsafe void Store(Vector mask, ushort* base, long vnum, (Vector data1, Vector data2, Vector data3)); // svst3_vnum[_u16] + /// public static unsafe void Store(Vector mask, uint* base, long vnum, (Vector data1, Vector data2, Vector data3)); // svst3_vnum[_u32] + /// public static unsafe void Store(Vector mask, ulong* base, long vnum, (Vector data1, Vector data2, Vector data3)); // svst3_vnum[_u64] + /// public static unsafe void Store(Vector mask, float* base, long vnum, (Vector data1, Vector data2, Vector data3, Vector data4)); // svst4_vnum[_f32] + /// public static unsafe void Store(Vector mask, double* base, long vnum, (Vector data1, Vector data2, Vector data3, Vector data4)); // svst4_vnum[_f64] + /// public static unsafe void Store(Vector mask, sbyte* base, long vnum, (Vector data1, Vector data2, Vector data3, Vector data4)); // svst4_vnum[_s8] + /// public static unsafe void Store(Vector mask, short* base, long vnum, (Vector data1, Vector data2, Vector data3, Vector data4)); // svst4_vnum[_s16] + /// public static unsafe void Store(Vector mask, int* base, long vnum, (Vector data1, Vector data2, Vector data3, Vector data4)); // svst4_vnum[_s32] + /// public static unsafe void Store(Vector mask, long* base, long vnum, (Vector data1, Vector data2, Vector data3, Vector data4)); // svst4_vnum[_s64] + /// public static unsafe void Store(Vector mask, byte* base, long vnum, (Vector data1, Vector data2, Vector data3, Vector data4)); // svst4_vnum[_u8] + /// public static unsafe void Store(Vector mask, ushort* base, long vnum, (Vector data1, Vector data2, Vector data3, Vector data4)); // svst4_vnum[_u16] + /// public static unsafe void Store(Vector mask, uint* base, long vnum, (Vector data1, Vector data2, Vector data3, Vector data4)); // svst4_vnum[_u32] + /// public static unsafe void Store(Vector mask, ulong* base, long vnum, (Vector data1, Vector data2, Vector data3, Vector data4)); // svst4_vnum[_u64] + /// public static unsafe void StoreNarrowing(Vector mask, sbyte* base, long vnum, Vector data); // svst1b_vnum[_s16] + /// public static unsafe void StoreNarrowing(Vector mask, sbyte* base, long vnum, Vector data); // svst1b_vnum[_s32] + /// public static unsafe void StoreNarrowing(Vector mask, sbyte* base, long vnum, Vector data); // svst1b_vnum[_s64] + /// public static unsafe void StoreNarrowing(Vector mask, byte* base, long vnum, Vector data); // svst1b_vnum[_u16] + /// public static unsafe void StoreNarrowing(Vector mask, byte* base, long vnum, Vector data); // svst1b_vnum[_u32] + /// public static unsafe void StoreNarrowing(Vector mask, byte* base, long vnum, Vector data); // svst1b_vnum[_u64] + /// public static unsafe void StoreNarrowing(Vector mask, short* base, long vnum, Vector data); // svst1h_vnum[_s32] + /// public static unsafe void StoreNarrowing(Vector mask, short* base, long vnum, Vector data); // svst1h_vnum[_s64] + /// public static unsafe void StoreNarrowing(Vector mask, ushort* base, long vnum, Vector data); // svst1h_vnum[_u32] + /// public static unsafe void StoreNarrowing(Vector mask, ushort* base, long vnum, Vector data); // svst1h_vnum[_u64] + /// public static unsafe void StoreNarrowing(Vector mask, int* base, long vnum, Vector data); // svst1w_vnum[_s64] + /// public static unsafe void StoreNarrowing(Vector mask, uint* base, long vnum, Vector data); // svst1w_vnum[_u64] + /// public static unsafe void StoreNonTemporal(Vector mask, float* base, long vnum, Vector data); // svstnt1_vnum[_f32] + /// public static unsafe void StoreNonTemporal(Vector mask, double* base, long vnum, Vector data); // svstnt1_vnum[_f64] + /// public static unsafe void StoreNonTemporal(Vector mask, sbyte* base, long vnum, Vector data); // svstnt1_vnum[_s8] + /// public static unsafe void StoreNonTemporal(Vector mask, short* base, long vnum, Vector data); // svstnt1_vnum[_s16] + /// public static unsafe void StoreNonTemporal(Vector mask, int* base, long vnum, Vector data); // svstnt1_vnum[_s32] + /// public static unsafe void StoreNonTemporal(Vector mask, long* base, long vnum, Vector data); // svstnt1_vnum[_s64] + /// public static unsafe void StoreNonTemporal(Vector mask, byte* base, long vnum, Vector data); // svstnt1_vnum[_u8] + /// public static unsafe void StoreNonTemporal(Vector mask, ushort* base, long vnum, Vector data); // svstnt1_vnum[_u16] + /// public static unsafe void StoreNonTemporal(Vector mask, uint* base, long vnum, Vector data); // svstnt1_vnum[_u32] + /// public static unsafe void StoreNonTemporal(Vector mask, ulong* base, long vnum, Vector data); // svstnt1_vnum[_u64] + /// Total Rejected: 62 + + /// Total ACLE covered across API: 124 + diff --git a/sve_api/out_api/apiraw_none__.cs b/sve_api/out_api/apiraw_none__.cs new file mode 100644 index 0000000000000..f37ee7ea48853 --- /dev/null +++ b/sve_api/out_api/apiraw_none__.cs @@ -0,0 +1,1080 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class SveNone : AdvSimd /// Feature: none +{ + + /// T: bfloat16, half, float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x); + + /// T: bfloat16, half, float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x); + + /// T: bfloat16, half, float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x); + + /// T: bfloat16, half, float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3); + + /// T: bfloat16, half, float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2); + + /// T: bfloat16, half, float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1); + + /// T: bfloat16, half, float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors(); + + /// T: bfloat16, half, float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors(); + + /// T: bfloat16, half, float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors(); + + /// T: bfloat16, half, float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CreateAnUninitializedVector(); + + public static unsafe Vector DuplicateSelectedScalarToVector(sbyte x0, [ConstantExpected] byte index, sbyte x2, sbyte x3, sbyte x4, sbyte x5, sbyte x6, sbyte x7, sbyte x8, sbyte x9, sbyte x10, sbyte x11, sbyte x12, sbyte x13, sbyte x14, sbyte x15); + + public static unsafe Vector DuplicateSelectedScalarToVector(byte x0, [ConstantExpected] byte index, byte x2, byte x3, byte x4, byte x5, byte x6, byte x7, byte x8, byte x9, byte x10, byte x11, byte x12, byte x13, byte x14, byte x15); + + public static unsafe Vector DuplicateSelectedScalarToVector(bool x0, [ConstantExpected] byte index, bool x2, bool x3, bool x4, bool x5, bool x6, bool x7, bool x8, bool x9, bool x10, bool x11, bool x12, bool x13, bool x14, bool x15); + + /// T: bfloat16, half, short, ushort + public static unsafe Vector DuplicateSelectedScalarToVector(T x0, [ConstantExpected] byte index, T x2, T x3, T x4, T x5, T x6, T x7); + + public static unsafe Vector DuplicateSelectedScalarToVector(bool x0, [ConstantExpected] byte index, bool x2, bool x3, bool x4, bool x5, bool x6, bool x7); + + /// T: float, int, uint + public static unsafe Vector DuplicateSelectedScalarToVector(T x0, [ConstantExpected] byte index, T x2, T x3); + + public static unsafe Vector DuplicateSelectedScalarToVector(bool x0, [ConstantExpected] byte index, bool x2, bool x3); + + /// T: double, long, ulong + public static unsafe Vector DuplicateSelectedScalarToVector(T x0, [ConstantExpected] byte index); + + public static unsafe Vector DuplicateSelectedScalarToVector(bool x0, [ConstantExpected] byte index); + + /// T: bfloat16, half, float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index); + + /// T: bfloat16, half, float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index); + + /// T: bfloat16, half, float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index); + + /// T: bfloat16, half, float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// T: [bfloat16, half], [bfloat16, float], [bfloat16, double], [bfloat16, sbyte], [bfloat16, short], [bfloat16, int], [bfloat16, long], [bfloat16, byte], [bfloat16, ushort], [bfloat16, uint], [bfloat16, ulong], [half, bfloat16], [half, float], [half, double], [half, sbyte], [half, short], [half, int], [half, long], [half, byte], [half, ushort], [half, uint], [half, ulong], [float, bfloat16], [float, half], [float, double], [float, sbyte], [float, short], [float, int], [float, long], [float, byte], [float, ushort], [float, uint], [float, ulong], [double, bfloat16], [double, half], [double, float], [double, sbyte], [double, short], [double, int], [double, long], [double, byte], [double, ushort], [double, uint], [double, ulong], [sbyte, bfloat16], [sbyte, half], [sbyte, float], [sbyte, double], [sbyte, short], [sbyte, int], [sbyte, long], [sbyte, byte], [sbyte, ushort], [sbyte, uint], [sbyte, ulong], [short, bfloat16], [short, half], [short, float], [short, double], [short, sbyte], [short, int], [short, long], [short, byte], [short, ushort], [short, uint], [short, ulong], [int, bfloat16], [int, half], [int, float], [int, double], [int, sbyte], [int, short], [int, long], [int, byte], [int, ushort], [int, uint], [int, ulong], [long, bfloat16], [long, half], [long, float], [long, double], [long, sbyte], [long, short], [long, int], [long, byte], [long, ushort], [long, uint], [long, ulong], [byte, bfloat16], [byte, half], [byte, float], [byte, double], [byte, sbyte], [byte, short], [byte, int], [byte, long], [byte, ushort], [byte, uint], [byte, ulong], [ushort, bfloat16], [ushort, half], [ushort, float], [ushort, double], [ushort, sbyte], [ushort, short], [ushort, int], [ushort, long], [ushort, byte], [ushort, uint], [ushort, ulong], [uint, bfloat16], [uint, half], [uint, float], [uint, double], [uint, sbyte], [uint, short], [uint, int], [uint, long], [uint, byte], [uint, ushort], [uint, ulong], [ulong, bfloat16], [ulong, half], [ulong, float], [ulong, double], [ulong, sbyte], [ulong, short], [ulong, int], [ulong, long], [ulong, byte], [ulong, ushort], [ulong, uint] + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// total method signatures: 24 + +} + + +/// Full API +public abstract partial class SveNone : AdvSimd /// Feature: none +{ + /// ChangeOneVectorInATupleOfFourVectors : Change one vector in a tuple of four vectors + + /// svbfloat16x4_t svset4[_bf16](svbfloat16x4_t tuple, uint64_t imm_index, svbfloat16_t x) : + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x); + + /// svfloat16x4_t svset4[_f16](svfloat16x4_t tuple, uint64_t imm_index, svfloat16_t x) : + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x); + + /// svfloat32x4_t svset4[_f32](svfloat32x4_t tuple, uint64_t imm_index, svfloat32_t x) : + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x); + + /// svfloat64x4_t svset4[_f64](svfloat64x4_t tuple, uint64_t imm_index, svfloat64_t x) : + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x); + + /// svint8x4_t svset4[_s8](svint8x4_t tuple, uint64_t imm_index, svint8_t x) : + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x); + + /// svint16x4_t svset4[_s16](svint16x4_t tuple, uint64_t imm_index, svint16_t x) : + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x); + + /// svint32x4_t svset4[_s32](svint32x4_t tuple, uint64_t imm_index, svint32_t x) : + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x); + + /// svint64x4_t svset4[_s64](svint64x4_t tuple, uint64_t imm_index, svint64_t x) : + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x); + + /// svuint8x4_t svset4[_u8](svuint8x4_t tuple, uint64_t imm_index, svuint8_t x) : + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x); + + /// svuint16x4_t svset4[_u16](svuint16x4_t tuple, uint64_t imm_index, svuint16_t x) : + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x); + + /// svuint32x4_t svset4[_u32](svuint32x4_t tuple, uint64_t imm_index, svuint32_t x) : + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x); + + /// svuint64x4_t svset4[_u64](svuint64x4_t tuple, uint64_t imm_index, svuint64_t x) : + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x); + + + /// ChangeOneVectorInATupleOfThreeVectors : Change one vector in a tuple of three vectors + + /// svbfloat16x3_t svset3[_bf16](svbfloat16x3_t tuple, uint64_t imm_index, svbfloat16_t x) : + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x); + + /// svfloat16x3_t svset3[_f16](svfloat16x3_t tuple, uint64_t imm_index, svfloat16_t x) : + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x); + + /// svfloat32x3_t svset3[_f32](svfloat32x3_t tuple, uint64_t imm_index, svfloat32_t x) : + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x); + + /// svfloat64x3_t svset3[_f64](svfloat64x3_t tuple, uint64_t imm_index, svfloat64_t x) : + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x); + + /// svint8x3_t svset3[_s8](svint8x3_t tuple, uint64_t imm_index, svint8_t x) : + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x); + + /// svint16x3_t svset3[_s16](svint16x3_t tuple, uint64_t imm_index, svint16_t x) : + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x); + + /// svint32x3_t svset3[_s32](svint32x3_t tuple, uint64_t imm_index, svint32_t x) : + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x); + + /// svint64x3_t svset3[_s64](svint64x3_t tuple, uint64_t imm_index, svint64_t x) : + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x); + + /// svuint8x3_t svset3[_u8](svuint8x3_t tuple, uint64_t imm_index, svuint8_t x) : + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x); + + /// svuint16x3_t svset3[_u16](svuint16x3_t tuple, uint64_t imm_index, svuint16_t x) : + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x); + + /// svuint32x3_t svset3[_u32](svuint32x3_t tuple, uint64_t imm_index, svuint32_t x) : + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x); + + /// svuint64x3_t svset3[_u64](svuint64x3_t tuple, uint64_t imm_index, svuint64_t x) : + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x); + + + /// ChangeOneVectorInATupleOfTwoVectors : Change one vector in a tuple of two vectors + + /// svbfloat16x2_t svset2[_bf16](svbfloat16x2_t tuple, uint64_t imm_index, svbfloat16_t x) : + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x); + + /// svfloat16x2_t svset2[_f16](svfloat16x2_t tuple, uint64_t imm_index, svfloat16_t x) : + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x); + + /// svfloat32x2_t svset2[_f32](svfloat32x2_t tuple, uint64_t imm_index, svfloat32_t x) : + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x); + + /// svfloat64x2_t svset2[_f64](svfloat64x2_t tuple, uint64_t imm_index, svfloat64_t x) : + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x); + + /// svint8x2_t svset2[_s8](svint8x2_t tuple, uint64_t imm_index, svint8_t x) : + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x); + + /// svint16x2_t svset2[_s16](svint16x2_t tuple, uint64_t imm_index, svint16_t x) : + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x); + + /// svint32x2_t svset2[_s32](svint32x2_t tuple, uint64_t imm_index, svint32_t x) : + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x); + + /// svint64x2_t svset2[_s64](svint64x2_t tuple, uint64_t imm_index, svint64_t x) : + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x); + + /// svuint8x2_t svset2[_u8](svuint8x2_t tuple, uint64_t imm_index, svuint8_t x) : + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x); + + /// svuint16x2_t svset2[_u16](svuint16x2_t tuple, uint64_t imm_index, svuint16_t x) : + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x); + + /// svuint32x2_t svset2[_u32](svuint32x2_t tuple, uint64_t imm_index, svuint32_t x) : + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x); + + /// svuint64x2_t svset2[_u64](svuint64x2_t tuple, uint64_t imm_index, svuint64_t x) : + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x); + + + /// CreateATupleOfFourVectors : Create a tuple of four vectors + + /// svbfloat16x4_t svcreate4[_bf16](svbfloat16_t x0, svbfloat16_t x1, svbfloat16_t x2, svbfloat16_t x3) : + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3); + + /// svfloat16x4_t svcreate4[_f16](svfloat16_t x0, svfloat16_t x1, svfloat16_t x2, svfloat16_t x3) : + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3); + + /// svfloat32x4_t svcreate4[_f32](svfloat32_t x0, svfloat32_t x1, svfloat32_t x2, svfloat32_t x3) : + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3); + + /// svfloat64x4_t svcreate4[_f64](svfloat64_t x0, svfloat64_t x1, svfloat64_t x2, svfloat64_t x3) : + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3); + + /// svint8x4_t svcreate4[_s8](svint8_t x0, svint8_t x1, svint8_t x2, svint8_t x3) : + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3); + + /// svint16x4_t svcreate4[_s16](svint16_t x0, svint16_t x1, svint16_t x2, svint16_t x3) : + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3); + + /// svint32x4_t svcreate4[_s32](svint32_t x0, svint32_t x1, svint32_t x2, svint32_t x3) : + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3); + + /// svint64x4_t svcreate4[_s64](svint64_t x0, svint64_t x1, svint64_t x2, svint64_t x3) : + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3); + + /// svuint8x4_t svcreate4[_u8](svuint8_t x0, svuint8_t x1, svuint8_t x2, svuint8_t x3) : + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3); + + /// svuint16x4_t svcreate4[_u16](svuint16_t x0, svuint16_t x1, svuint16_t x2, svuint16_t x3) : + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3); + + /// svuint32x4_t svcreate4[_u32](svuint32_t x0, svuint32_t x1, svuint32_t x2, svuint32_t x3) : + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3); + + /// svuint64x4_t svcreate4[_u64](svuint64_t x0, svuint64_t x1, svuint64_t x2, svuint64_t x3) : + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3); + + + /// CreateATupleOfThreeVectors : Create a tuple of three vectors + + /// svbfloat16x3_t svcreate3[_bf16](svbfloat16_t x0, svbfloat16_t x1, svbfloat16_t x2) : + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2); + + /// svfloat16x3_t svcreate3[_f16](svfloat16_t x0, svfloat16_t x1, svfloat16_t x2) : + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2); + + /// svfloat32x3_t svcreate3[_f32](svfloat32_t x0, svfloat32_t x1, svfloat32_t x2) : + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2); + + /// svfloat64x3_t svcreate3[_f64](svfloat64_t x0, svfloat64_t x1, svfloat64_t x2) : + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2); + + /// svint8x3_t svcreate3[_s8](svint8_t x0, svint8_t x1, svint8_t x2) : + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2); + + /// svint16x3_t svcreate3[_s16](svint16_t x0, svint16_t x1, svint16_t x2) : + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2); + + /// svint32x3_t svcreate3[_s32](svint32_t x0, svint32_t x1, svint32_t x2) : + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2); + + /// svint64x3_t svcreate3[_s64](svint64_t x0, svint64_t x1, svint64_t x2) : + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2); + + /// svuint8x3_t svcreate3[_u8](svuint8_t x0, svuint8_t x1, svuint8_t x2) : + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2); + + /// svuint16x3_t svcreate3[_u16](svuint16_t x0, svuint16_t x1, svuint16_t x2) : + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2); + + /// svuint32x3_t svcreate3[_u32](svuint32_t x0, svuint32_t x1, svuint32_t x2) : + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2); + + /// svuint64x3_t svcreate3[_u64](svuint64_t x0, svuint64_t x1, svuint64_t x2) : + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2); + + + /// CreateATupleOfTwoVectors : Create a tuple of two vectors + + /// svbfloat16x2_t svcreate2[_bf16](svbfloat16_t x0, svbfloat16_t x1) : + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1); + + /// svfloat16x2_t svcreate2[_f16](svfloat16_t x0, svfloat16_t x1) : + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1); + + /// svfloat32x2_t svcreate2[_f32](svfloat32_t x0, svfloat32_t x1) : + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1); + + /// svfloat64x2_t svcreate2[_f64](svfloat64_t x0, svfloat64_t x1) : + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1); + + /// svint8x2_t svcreate2[_s8](svint8_t x0, svint8_t x1) : + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1); + + /// svint16x2_t svcreate2[_s16](svint16_t x0, svint16_t x1) : + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1); + + /// svint32x2_t svcreate2[_s32](svint32_t x0, svint32_t x1) : + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1); + + /// svint64x2_t svcreate2[_s64](svint64_t x0, svint64_t x1) : + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1); + + /// svuint8x2_t svcreate2[_u8](svuint8_t x0, svuint8_t x1) : + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1); + + /// svuint16x2_t svcreate2[_u16](svuint16_t x0, svuint16_t x1) : + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1); + + /// svuint32x2_t svcreate2[_u32](svuint32_t x0, svuint32_t x1) : + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1); + + /// svuint64x2_t svcreate2[_u64](svuint64_t x0, svuint64_t x1) : + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1); + + + /// CreateAnUninitializedTupleOfFourVectors : Create an uninitialized tuple of four vectors + + /// svbfloat16x4_t svundef4_bf16() : + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors(); + + /// svfloat16x4_t svundef4_f16() : + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors(); + + /// svfloat32x4_t svundef4_f32() : + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors(); + + /// svfloat64x4_t svundef4_f64() : + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors(); + + /// svint8x4_t svundef4_s8() : + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors(); + + /// svint16x4_t svundef4_s16() : + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors(); + + /// svint32x4_t svundef4_s32() : + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors(); + + /// svint64x4_t svundef4_s64() : + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors(); + + /// svuint8x4_t svundef4_u8() : + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors(); + + /// svuint16x4_t svundef4_u16() : + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors(); + + /// svuint32x4_t svundef4_u32() : + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors(); + + /// svuint64x4_t svundef4_u64() : + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors(); + + + /// CreateAnUninitializedTupleOfThreeVectors : Create an uninitialized tuple of three vectors + + /// svbfloat16x3_t svundef3_bf16() : + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors(); + + /// svfloat16x3_t svundef3_f16() : + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors(); + + /// svfloat32x3_t svundef3_f32() : + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors(); + + /// svfloat64x3_t svundef3_f64() : + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors(); + + /// svint8x3_t svundef3_s8() : + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors(); + + /// svint16x3_t svundef3_s16() : + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors(); + + /// svint32x3_t svundef3_s32() : + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors(); + + /// svint64x3_t svundef3_s64() : + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors(); + + /// svuint8x3_t svundef3_u8() : + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors(); + + /// svuint16x3_t svundef3_u16() : + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors(); + + /// svuint32x3_t svundef3_u32() : + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors(); + + /// svuint64x3_t svundef3_u64() : + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors(); + + + /// CreateAnUninitializedTupleOfTwoVectors : Create an uninitialized tuple of two vectors + + /// svbfloat16x2_t svundef2_bf16() : + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors(); + + /// svfloat16x2_t svundef2_f16() : + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors(); + + /// svfloat32x2_t svundef2_f32() : + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors(); + + /// svfloat64x2_t svundef2_f64() : + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors(); + + /// svint8x2_t svundef2_s8() : + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors(); + + /// svint16x2_t svundef2_s16() : + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors(); + + /// svint32x2_t svundef2_s32() : + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors(); + + /// svint64x2_t svundef2_s64() : + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors(); + + /// svuint8x2_t svundef2_u8() : + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors(); + + /// svuint16x2_t svundef2_u16() : + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors(); + + /// svuint32x2_t svundef2_u32() : + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors(); + + /// svuint64x2_t svundef2_u64() : + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors(); + + + /// CreateAnUninitializedVector : Create an uninitialized vector + + /// svbfloat16_t svundef_bf16() : + public static unsafe Vector CreateAnUninitializedVector(); + + /// svfloat16_t svundef_f16() : + public static unsafe Vector CreateAnUninitializedVector(); + + /// svfloat32_t svundef_f32() : + public static unsafe Vector CreateAnUninitializedVector(); + + /// svfloat64_t svundef_f64() : + public static unsafe Vector CreateAnUninitializedVector(); + + /// svint8_t svundef_s8() : + public static unsafe Vector CreateAnUninitializedVector(); + + /// svint16_t svundef_s16() : + public static unsafe Vector CreateAnUninitializedVector(); + + /// svint32_t svundef_s32() : + public static unsafe Vector CreateAnUninitializedVector(); + + /// svint64_t svundef_s64() : + public static unsafe Vector CreateAnUninitializedVector(); + + /// svuint8_t svundef_u8() : + public static unsafe Vector CreateAnUninitializedVector(); + + /// svuint16_t svundef_u16() : + public static unsafe Vector CreateAnUninitializedVector(); + + /// svuint32_t svundef_u32() : + public static unsafe Vector CreateAnUninitializedVector(); + + /// svuint64_t svundef_u64() : + public static unsafe Vector CreateAnUninitializedVector(); + + + /// DuplicateSelectedScalarToVector : Broadcast a quadword of scalars + + /// svint8_t svdupq[_n]_s8(int8_t x0, int8_t x1, int8_t x2, int8_t x3, int8_t x4, int8_t x5, int8_t x6, int8_t x7, int8_t x8, int8_t x9, int8_t x10, int8_t x11, int8_t x12, int8_t x13, int8_t x14, int8_t x15) : + public static unsafe Vector DuplicateSelectedScalarToVector(sbyte x0, [ConstantExpected] byte index, sbyte x2, sbyte x3, sbyte x4, sbyte x5, sbyte x6, sbyte x7, sbyte x8, sbyte x9, sbyte x10, sbyte x11, sbyte x12, sbyte x13, sbyte x14, sbyte x15); + + /// svuint8_t svdupq[_n]_u8(uint8_t x0, uint8_t x1, uint8_t x2, uint8_t x3, uint8_t x4, uint8_t x5, uint8_t x6, uint8_t x7, uint8_t x8, uint8_t x9, uint8_t x10, uint8_t x11, uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15) : + public static unsafe Vector DuplicateSelectedScalarToVector(byte x0, [ConstantExpected] byte index, byte x2, byte x3, byte x4, byte x5, byte x6, byte x7, byte x8, byte x9, byte x10, byte x11, byte x12, byte x13, byte x14, byte x15); + + /// svbool_t svdupq[_n]_b8(bool x0, bool x1, bool x2, bool x3, bool x4, bool x5, bool x6, bool x7, bool x8, bool x9, bool x10, bool x11, bool x12, bool x13, bool x14, bool x15) : + public static unsafe Vector DuplicateSelectedScalarToVector(bool x0, [ConstantExpected] byte index, bool x2, bool x3, bool x4, bool x5, bool x6, bool x7, bool x8, bool x9, bool x10, bool x11, bool x12, bool x13, bool x14, bool x15); + + /// svbfloat16_t svdupq[_n]_bf16(bfloat16_t x0, bfloat16_t x1, bfloat16_t x2, bfloat16_t x3, bfloat16_t x4, bfloat16_t x5, bfloat16_t x6, bfloat16_t x7) : + public static unsafe Vector DuplicateSelectedScalarToVector(bfloat16 x0, [ConstantExpected] byte index, bfloat16 x2, bfloat16 x3, bfloat16 x4, bfloat16 x5, bfloat16 x6, bfloat16 x7); + + /// svfloat16_t svdupq[_n]_f16(float16_t x0, float16_t x1, float16_t x2, float16_t x3, float16_t x4, float16_t x5, float16_t x6, float16_t x7) : + public static unsafe Vector DuplicateSelectedScalarToVector(half x0, [ConstantExpected] byte index, half x2, half x3, half x4, half x5, half x6, half x7); + + /// svint16_t svdupq[_n]_s16(int16_t x0, int16_t x1, int16_t x2, int16_t x3, int16_t x4, int16_t x5, int16_t x6, int16_t x7) : + public static unsafe Vector DuplicateSelectedScalarToVector(short x0, [ConstantExpected] byte index, short x2, short x3, short x4, short x5, short x6, short x7); + + /// svuint16_t svdupq[_n]_u16(uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3, uint16_t x4, uint16_t x5, uint16_t x6, uint16_t x7) : + public static unsafe Vector DuplicateSelectedScalarToVector(ushort x0, [ConstantExpected] byte index, ushort x2, ushort x3, ushort x4, ushort x5, ushort x6, ushort x7); + + /// svbool_t svdupq[_n]_b16(bool x0, bool x1, bool x2, bool x3, bool x4, bool x5, bool x6, bool x7) : + public static unsafe Vector DuplicateSelectedScalarToVector(bool x0, [ConstantExpected] byte index, bool x2, bool x3, bool x4, bool x5, bool x6, bool x7); + + /// svfloat32_t svdupq[_n]_f32(float32_t x0, float32_t x1, float32_t x2, float32_t x3) : + public static unsafe Vector DuplicateSelectedScalarToVector(float x0, [ConstantExpected] byte index, float x2, float x3); + + /// svint32_t svdupq[_n]_s32(int32_t x0, int32_t x1, int32_t x2, int32_t x3) : + public static unsafe Vector DuplicateSelectedScalarToVector(int x0, [ConstantExpected] byte index, int x2, int x3); + + /// svuint32_t svdupq[_n]_u32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3) : + public static unsafe Vector DuplicateSelectedScalarToVector(uint x0, [ConstantExpected] byte index, uint x2, uint x3); + + /// svbool_t svdupq[_n]_b32(bool x0, bool x1, bool x2, bool x3) : + public static unsafe Vector DuplicateSelectedScalarToVector(bool x0, [ConstantExpected] byte index, bool x2, bool x3); + + /// svfloat64_t svdupq[_n]_f64(float64_t x0, float64_t x1) : + public static unsafe Vector DuplicateSelectedScalarToVector(double x0, [ConstantExpected] byte index); + + /// svint64_t svdupq[_n]_s64(int64_t x0, int64_t x1) : + public static unsafe Vector DuplicateSelectedScalarToVector(long x0, [ConstantExpected] byte index); + + /// svuint64_t svdupq[_n]_u64(uint64_t x0, uint64_t x1) : + public static unsafe Vector DuplicateSelectedScalarToVector(ulong x0, [ConstantExpected] byte index); + + /// svbool_t svdupq[_n]_b64(bool x0, bool x1) : + public static unsafe Vector DuplicateSelectedScalarToVector(bool x0, [ConstantExpected] byte index); + + + /// ExtractOneVectorFromATupleOfFourVectors : Extract one vector from a tuple of four vectors + + /// svbfloat16_t svget4[_bf16](svbfloat16x4_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index); + + /// svfloat16_t svget4[_f16](svfloat16x4_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index); + + /// svfloat32_t svget4[_f32](svfloat32x4_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index); + + /// svfloat64_t svget4[_f64](svfloat64x4_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index); + + /// svint8_t svget4[_s8](svint8x4_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index); + + /// svint16_t svget4[_s16](svint16x4_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index); + + /// svint32_t svget4[_s32](svint32x4_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index); + + /// svint64_t svget4[_s64](svint64x4_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index); + + /// svuint8_t svget4[_u8](svuint8x4_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index); + + /// svuint16_t svget4[_u16](svuint16x4_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index); + + /// svuint32_t svget4[_u32](svuint32x4_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index); + + /// svuint64_t svget4[_u64](svuint64x4_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index); + + + /// ExtractOneVectorFromATupleOfThreeVectors : Extract one vector from a tuple of three vectors + + /// svbfloat16_t svget3[_bf16](svbfloat16x3_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index); + + /// svfloat16_t svget3[_f16](svfloat16x3_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index); + + /// svfloat32_t svget3[_f32](svfloat32x3_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index); + + /// svfloat64_t svget3[_f64](svfloat64x3_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index); + + /// svint8_t svget3[_s8](svint8x3_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index); + + /// svint16_t svget3[_s16](svint16x3_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index); + + /// svint32_t svget3[_s32](svint32x3_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index); + + /// svint64_t svget3[_s64](svint64x3_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index); + + /// svuint8_t svget3[_u8](svuint8x3_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index); + + /// svuint16_t svget3[_u16](svuint16x3_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index); + + /// svuint32_t svget3[_u32](svuint32x3_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index); + + /// svuint64_t svget3[_u64](svuint64x3_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index); + + + /// ExtractOneVectorFromATupleOfTwoVectors : Extract one vector from a tuple of two vectors + + /// svbfloat16_t svget2[_bf16](svbfloat16x2_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index); + + /// svfloat16_t svget2[_f16](svfloat16x2_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index); + + /// svfloat32_t svget2[_f32](svfloat32x2_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index); + + /// svfloat64_t svget2[_f64](svfloat64x2_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index); + + /// svint8_t svget2[_s8](svint8x2_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index); + + /// svint16_t svget2[_s16](svint16x2_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index); + + /// svint32_t svget2[_s32](svint32x2_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index); + + /// svint64_t svget2[_s64](svint64x2_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index); + + /// svuint8_t svget2[_u8](svuint8x2_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index); + + /// svuint16_t svget2[_u16](svuint16x2_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index); + + /// svuint32_t svget2[_u32](svuint32x2_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index); + + /// svuint64_t svget2[_u64](svuint64x2_t tuple, uint64_t imm_index) : + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index); + + + /// ReinterpretVectorContents : Reinterpret vector contents + + /// svbfloat16_t svreinterpret_bf16[_bf16](svbfloat16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svbfloat16_t svreinterpret_bf16[_f16](svfloat16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svbfloat16_t svreinterpret_bf16[_f32](svfloat32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svbfloat16_t svreinterpret_bf16[_f64](svfloat64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svbfloat16_t svreinterpret_bf16[_s8](svint8_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svbfloat16_t svreinterpret_bf16[_s16](svint16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svbfloat16_t svreinterpret_bf16[_s32](svint32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svbfloat16_t svreinterpret_bf16[_s64](svint64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svbfloat16_t svreinterpret_bf16[_u8](svuint8_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svbfloat16_t svreinterpret_bf16[_u16](svuint16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svbfloat16_t svreinterpret_bf16[_u32](svuint32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svbfloat16_t svreinterpret_bf16[_u64](svuint64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat16_t svreinterpret_f16[_bf16](svbfloat16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat16_t svreinterpret_f16[_f16](svfloat16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat16_t svreinterpret_f16[_f32](svfloat32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat16_t svreinterpret_f16[_f64](svfloat64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat16_t svreinterpret_f16[_s8](svint8_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat16_t svreinterpret_f16[_s16](svint16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat16_t svreinterpret_f16[_s32](svint32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat16_t svreinterpret_f16[_s64](svint64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat16_t svreinterpret_f16[_u8](svuint8_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat16_t svreinterpret_f16[_u16](svuint16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat16_t svreinterpret_f16[_u32](svuint32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat16_t svreinterpret_f16[_u64](svuint64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat32_t svreinterpret_f32[_bf16](svbfloat16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat32_t svreinterpret_f32[_f16](svfloat16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat32_t svreinterpret_f32[_f32](svfloat32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat32_t svreinterpret_f32[_f64](svfloat64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat32_t svreinterpret_f32[_s8](svint8_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat32_t svreinterpret_f32[_s16](svint16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat32_t svreinterpret_f32[_s32](svint32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat32_t svreinterpret_f32[_s64](svint64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat32_t svreinterpret_f32[_u8](svuint8_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat32_t svreinterpret_f32[_u16](svuint16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat32_t svreinterpret_f32[_u32](svuint32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat32_t svreinterpret_f32[_u64](svuint64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat64_t svreinterpret_f64[_bf16](svbfloat16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat64_t svreinterpret_f64[_f16](svfloat16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat64_t svreinterpret_f64[_f32](svfloat32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat64_t svreinterpret_f64[_f64](svfloat64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat64_t svreinterpret_f64[_s8](svint8_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat64_t svreinterpret_f64[_s16](svint16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat64_t svreinterpret_f64[_s32](svint32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat64_t svreinterpret_f64[_s64](svint64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat64_t svreinterpret_f64[_u8](svuint8_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat64_t svreinterpret_f64[_u16](svuint16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat64_t svreinterpret_f64[_u32](svuint32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svfloat64_t svreinterpret_f64[_u64](svuint64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint8_t svreinterpret_s8[_bf16](svbfloat16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint8_t svreinterpret_s8[_f16](svfloat16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint8_t svreinterpret_s8[_f32](svfloat32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint8_t svreinterpret_s8[_f64](svfloat64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint8_t svreinterpret_s8[_s8](svint8_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint8_t svreinterpret_s8[_s16](svint16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint8_t svreinterpret_s8[_s32](svint32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint8_t svreinterpret_s8[_s64](svint64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint8_t svreinterpret_s8[_u8](svuint8_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint8_t svreinterpret_s8[_u16](svuint16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint8_t svreinterpret_s8[_u32](svuint32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint8_t svreinterpret_s8[_u64](svuint64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint16_t svreinterpret_s16[_bf16](svbfloat16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint16_t svreinterpret_s16[_f16](svfloat16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint16_t svreinterpret_s16[_f32](svfloat32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint16_t svreinterpret_s16[_f64](svfloat64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint16_t svreinterpret_s16[_s8](svint8_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint16_t svreinterpret_s16[_s16](svint16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint16_t svreinterpret_s16[_s32](svint32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint16_t svreinterpret_s16[_s64](svint64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint16_t svreinterpret_s16[_u8](svuint8_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint16_t svreinterpret_s16[_u16](svuint16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint16_t svreinterpret_s16[_u32](svuint32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint16_t svreinterpret_s16[_u64](svuint64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint32_t svreinterpret_s32[_bf16](svbfloat16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint32_t svreinterpret_s32[_f16](svfloat16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint32_t svreinterpret_s32[_f32](svfloat32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint32_t svreinterpret_s32[_f64](svfloat64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint32_t svreinterpret_s32[_s8](svint8_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint32_t svreinterpret_s32[_s16](svint16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint32_t svreinterpret_s32[_s32](svint32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint32_t svreinterpret_s32[_s64](svint64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint32_t svreinterpret_s32[_u8](svuint8_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint32_t svreinterpret_s32[_u16](svuint16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint32_t svreinterpret_s32[_u32](svuint32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint32_t svreinterpret_s32[_u64](svuint64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint64_t svreinterpret_s64[_bf16](svbfloat16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint64_t svreinterpret_s64[_f16](svfloat16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint64_t svreinterpret_s64[_f32](svfloat32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint64_t svreinterpret_s64[_f64](svfloat64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint64_t svreinterpret_s64[_s8](svint8_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint64_t svreinterpret_s64[_s16](svint16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint64_t svreinterpret_s64[_s32](svint32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint64_t svreinterpret_s64[_s64](svint64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint64_t svreinterpret_s64[_u8](svuint8_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint64_t svreinterpret_s64[_u16](svuint16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint64_t svreinterpret_s64[_u32](svuint32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svint64_t svreinterpret_s64[_u64](svuint64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint8_t svreinterpret_u8[_bf16](svbfloat16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint8_t svreinterpret_u8[_f16](svfloat16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint8_t svreinterpret_u8[_f32](svfloat32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint8_t svreinterpret_u8[_f64](svfloat64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint8_t svreinterpret_u8[_s8](svint8_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint8_t svreinterpret_u8[_s16](svint16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint8_t svreinterpret_u8[_s32](svint32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint8_t svreinterpret_u8[_s64](svint64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint8_t svreinterpret_u8[_u8](svuint8_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint8_t svreinterpret_u8[_u16](svuint16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint8_t svreinterpret_u8[_u32](svuint32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint8_t svreinterpret_u8[_u64](svuint64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint16_t svreinterpret_u16[_bf16](svbfloat16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint16_t svreinterpret_u16[_f16](svfloat16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint16_t svreinterpret_u16[_f32](svfloat32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint16_t svreinterpret_u16[_f64](svfloat64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint16_t svreinterpret_u16[_s8](svint8_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint16_t svreinterpret_u16[_s16](svint16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint16_t svreinterpret_u16[_s32](svint32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint16_t svreinterpret_u16[_s64](svint64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint16_t svreinterpret_u16[_u8](svuint8_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint16_t svreinterpret_u16[_u16](svuint16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint16_t svreinterpret_u16[_u32](svuint32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint16_t svreinterpret_u16[_u64](svuint64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint32_t svreinterpret_u32[_bf16](svbfloat16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint32_t svreinterpret_u32[_f16](svfloat16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint32_t svreinterpret_u32[_f32](svfloat32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint32_t svreinterpret_u32[_f64](svfloat64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint32_t svreinterpret_u32[_s8](svint8_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint32_t svreinterpret_u32[_s16](svint16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint32_t svreinterpret_u32[_s32](svint32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint32_t svreinterpret_u32[_s64](svint64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint32_t svreinterpret_u32[_u8](svuint8_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint32_t svreinterpret_u32[_u16](svuint16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint32_t svreinterpret_u32[_u32](svuint32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint32_t svreinterpret_u32[_u64](svuint64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint64_t svreinterpret_u64[_bf16](svbfloat16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint64_t svreinterpret_u64[_f16](svfloat16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint64_t svreinterpret_u64[_f32](svfloat32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint64_t svreinterpret_u64[_f64](svfloat64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint64_t svreinterpret_u64[_s8](svint8_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint64_t svreinterpret_u64[_s16](svint16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint64_t svreinterpret_u64[_s32](svint32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint64_t svreinterpret_u64[_s64](svint64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint64_t svreinterpret_u64[_u8](svuint8_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint64_t svreinterpret_u64[_u16](svuint16_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint64_t svreinterpret_u64[_u32](svuint32_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + /// svuint64_t svreinterpret_u64[_u64](svuint64_t op) : + public static unsafe Vector ReinterpretVectorContents(Vector value); + + + /// total method signatures: 316 + /// total method names: 15 +} + + + /// Total ACLE covered across API: 316 + diff --git a/sve_api/out_cs_api/Sha3.PlatformNotSupported.cs b/sve_api/out_cs_api/Sha3.PlatformNotSupported.cs new file mode 100644 index 0000000000000..64068e9d2ff25 --- /dev/null +++ b/sve_api/out_cs_api/Sha3.PlatformNotSupported.cs @@ -0,0 +1,133 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class Sha3 : AdvSimd + { + internal Sha3() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// BitwiseClearXor : Bit Clear and Exclusive OR performs a bitwise AND of the 128-bit vector in a source SIMD&FP register and the complement of the vector in another source SIMD&FP register, then performs a bitwise exclusive OR of the resulting vector and the vector in a third source SIMD&FP register, and writes the result to the destination SIMD&FP register. + + /// + /// int8x16_t vbcaxq_s8(int8x16_t a, int8x16_t b, int8x16_t c) + /// + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) { throw new PlatformNotSupportedException(); } + + /// + /// int16x8_t vbcaxq_s16(int16x8_t a, int16x8_t b, int16x8_t c) + /// + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) { throw new PlatformNotSupportedException(); } + + /// + /// int32x4_t vbcaxq_s32(int32x4_t a, int32x4_t b, int32x4_t c) + /// + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) { throw new PlatformNotSupportedException(); } + + /// + /// int64x2_t vbcaxq_s64(int64x2_t a, int64x2_t b, int64x2_t c) + /// + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) { throw new PlatformNotSupportedException(); } + + /// + /// uint8x16_t vbcaxq_u8(uint8x16_t a, uint8x16_t b, uint8x16_t c) + /// + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) { throw new PlatformNotSupportedException(); } + + /// + /// uint16x8_t vbcaxq_u16(uint16x8_t a, uint16x8_t b, uint16x8_t c) + /// + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) { throw new PlatformNotSupportedException(); } + + /// + /// uint32x4_t vbcaxq_u32(uint32x4_t a, uint32x4_t b, uint32x4_t c) + /// + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) { throw new PlatformNotSupportedException(); } + + /// + /// uint64x2_t vbcaxq_u64(uint64x2_t a, uint64x2_t b, uint64x2_t c) + /// + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) { throw new PlatformNotSupportedException(); } + + + /// BitwiseRotateLeftBy1AndXor : Rotate and Exclusive OR rotates each 64-bit element of the 128-bit vector in a source SIMD&FP register left by 1, performs a bitwise exclusive OR of the resulting 128-bit vector and the vector in another source SIMD&FP register, and writes the result to the destination SIMD&FP register. + + /// + /// uint64x2_t vrax1q_u64(uint64x2_t a, uint64x2_t b) + /// + public static unsafe Vector128 BitwiseRotateLeftBy1AndXor(Vector128 a, Vector128 b) { throw new PlatformNotSupportedException(); } + + + /// Xor : Three-way Exclusive OR performs a three-way exclusive OR of the values in the three source SIMD&FP registers, and writes the result to the destination SIMD&FP register. + + /// + /// int8x16_t veor3q_s8(int8x16_t a, int8x16_t b, int8x16_t c) + /// + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) { throw new PlatformNotSupportedException(); } + + /// + /// int16x8_t veor3q_s16(int16x8_t a, int16x8_t b, int16x8_t c) + /// + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) { throw new PlatformNotSupportedException(); } + + /// + /// int32x4_t veor3q_s32(int32x4_t a, int32x4_t b, int32x4_t c) + /// + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) { throw new PlatformNotSupportedException(); } + + /// + /// int64x2_t veor3q_s64(int64x2_t a, int64x2_t b, int64x2_t c) + /// + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) { throw new PlatformNotSupportedException(); } + + /// + /// uint8x16_t veor3q_u8(uint8x16_t a, uint8x16_t b, uint8x16_t c) + /// + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) { throw new PlatformNotSupportedException(); } + + /// + /// uint16x8_t veor3q_u16(uint16x8_t a, uint16x8_t b, uint16x8_t c) + /// + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) { throw new PlatformNotSupportedException(); } + + /// + /// uint32x4_t veor3q_u32(uint32x4_t a, uint32x4_t b, uint32x4_t c) + /// + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) { throw new PlatformNotSupportedException(); } + + /// + /// uint64x2_t veor3q_u64(uint64x2_t a, uint64x2_t b, uint64x2_t c) + /// + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) { throw new PlatformNotSupportedException(); } + + + /// XorRotateRight : Exclusive OR and Rotate performs a bitwise exclusive OR of the 128-bit vectors in the two source SIMD&FP registers, rotates each 64-bit element of the resulting 128-bit vector right by the value specified by a 6-bit immediate value, and writes the result to the destination SIMD&FP register. + + /// + /// uint64x2_t vxarq_u64(uint64x2_t a, uint64x2_t b, const int imm6) + /// + public static unsafe Vector128 XorRotateRight(Vector128 left, Vector128 right, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + } +} + diff --git a/sve_api/out_cs_api/Sha3.System.Runtime.Intrinsics.cs b/sve_api/out_cs_api/Sha3.System.Runtime.Intrinsics.cs new file mode 100644 index 0000000000000..1c3fe8e43d2d5 --- /dev/null +++ b/sve_api/out_cs_api/Sha3.System.Runtime.Intrinsics.cs @@ -0,0 +1,19 @@ + public static Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) { throw null; } + public static Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) { throw null; } + public static Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) { throw null; } + public static Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) { throw null; } + public static Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) { throw null; } + public static Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) { throw null; } + public static Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) { throw null; } + public static Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) { throw null; } + public static Vector128 BitwiseRotateLeftBy1AndXor(Vector128 a, Vector128 b) { throw null; } + public static Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) { throw null; } + public static Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) { throw null; } + public static Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) { throw null; } + public static Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) { throw null; } + public static Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) { throw null; } + public static Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) { throw null; } + public static Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) { throw null; } + public static Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) { throw null; } + public static Vector128 XorRotateRight(Vector128 left, Vector128 right, [ConstantExpected] byte count) { throw null; } + diff --git a/sve_api/out_cs_api/Sha3.cs b/sve_api/out_cs_api/Sha3.cs new file mode 100644 index 0000000000000..f89c0ff0fa7d7 --- /dev/null +++ b/sve_api/out_cs_api/Sha3.cs @@ -0,0 +1,133 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class Sha3 : AdvSimd + { + internal Sha3() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// BitwiseClearXor : Bit Clear and Exclusive OR performs a bitwise AND of the 128-bit vector in a source SIMD&FP register and the complement of the vector in another source SIMD&FP register, then performs a bitwise exclusive OR of the resulting vector and the vector in a third source SIMD&FP register, and writes the result to the destination SIMD&FP register. + + /// + /// int8x16_t vbcaxq_s8(int8x16_t a, int8x16_t b, int8x16_t c) + /// + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) => BitwiseClearXor(xor, value, mask); + + /// + /// int16x8_t vbcaxq_s16(int16x8_t a, int16x8_t b, int16x8_t c) + /// + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) => BitwiseClearXor(xor, value, mask); + + /// + /// int32x4_t vbcaxq_s32(int32x4_t a, int32x4_t b, int32x4_t c) + /// + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) => BitwiseClearXor(xor, value, mask); + + /// + /// int64x2_t vbcaxq_s64(int64x2_t a, int64x2_t b, int64x2_t c) + /// + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) => BitwiseClearXor(xor, value, mask); + + /// + /// uint8x16_t vbcaxq_u8(uint8x16_t a, uint8x16_t b, uint8x16_t c) + /// + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) => BitwiseClearXor(xor, value, mask); + + /// + /// uint16x8_t vbcaxq_u16(uint16x8_t a, uint16x8_t b, uint16x8_t c) + /// + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) => BitwiseClearXor(xor, value, mask); + + /// + /// uint32x4_t vbcaxq_u32(uint32x4_t a, uint32x4_t b, uint32x4_t c) + /// + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) => BitwiseClearXor(xor, value, mask); + + /// + /// uint64x2_t vbcaxq_u64(uint64x2_t a, uint64x2_t b, uint64x2_t c) + /// + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) => BitwiseClearXor(xor, value, mask); + + + /// BitwiseRotateLeftBy1AndXor : Rotate and Exclusive OR rotates each 64-bit element of the 128-bit vector in a source SIMD&FP register left by 1, performs a bitwise exclusive OR of the resulting 128-bit vector and the vector in another source SIMD&FP register, and writes the result to the destination SIMD&FP register. + + /// + /// uint64x2_t vrax1q_u64(uint64x2_t a, uint64x2_t b) + /// + public static unsafe Vector128 BitwiseRotateLeftBy1AndXor(Vector128 a, Vector128 b) => BitwiseRotateLeftBy1AndXor(a, b); + + + /// Xor : Three-way Exclusive OR performs a three-way exclusive OR of the values in the three source SIMD&FP registers, and writes the result to the destination SIMD&FP register. + + /// + /// int8x16_t veor3q_s8(int8x16_t a, int8x16_t b, int8x16_t c) + /// + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) => Xor(value1, value2, value3); + + /// + /// int16x8_t veor3q_s16(int16x8_t a, int16x8_t b, int16x8_t c) + /// + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) => Xor(value1, value2, value3); + + /// + /// int32x4_t veor3q_s32(int32x4_t a, int32x4_t b, int32x4_t c) + /// + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) => Xor(value1, value2, value3); + + /// + /// int64x2_t veor3q_s64(int64x2_t a, int64x2_t b, int64x2_t c) + /// + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) => Xor(value1, value2, value3); + + /// + /// uint8x16_t veor3q_u8(uint8x16_t a, uint8x16_t b, uint8x16_t c) + /// + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) => Xor(value1, value2, value3); + + /// + /// uint16x8_t veor3q_u16(uint16x8_t a, uint16x8_t b, uint16x8_t c) + /// + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) => Xor(value1, value2, value3); + + /// + /// uint32x4_t veor3q_u32(uint32x4_t a, uint32x4_t b, uint32x4_t c) + /// + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) => Xor(value1, value2, value3); + + /// + /// uint64x2_t veor3q_u64(uint64x2_t a, uint64x2_t b, uint64x2_t c) + /// + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) => Xor(value1, value2, value3); + + + /// XorRotateRight : Exclusive OR and Rotate performs a bitwise exclusive OR of the 128-bit vectors in the two source SIMD&FP registers, rotates each 64-bit element of the resulting 128-bit vector right by the value specified by a 6-bit immediate value, and writes the result to the destination SIMD&FP register. + + /// + /// uint64x2_t vxarq_u64(uint64x2_t a, uint64x2_t b, const int imm6) + /// + public static unsafe Vector128 XorRotateRight(Vector128 left, Vector128 right, [ConstantExpected] byte count) => XorRotateRight(left, right, count); + + } +} + diff --git a/sve_api/out_cs_api/Sm4.PlatformNotSupported.cs b/sve_api/out_cs_api/Sm4.PlatformNotSupported.cs new file mode 100644 index 0000000000000..c8afe122b04f7 --- /dev/null +++ b/sve_api/out_cs_api/Sm4.PlatformNotSupported.cs @@ -0,0 +1,47 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class Sm4 : AdvSimd + { + internal Sm4() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// Sm4EncryptionAndDecryption : SM4 Encode takes input data as a 128-bit vector from the first source SIMD&FP register, and four iterations of the round key held as the elements of the 128-bit vector in the second source SIMD&FP register. It encrypts the data by four rounds, in accordance with the SM4 standard, returning the 128-bit result to the destination SIMD&FP register. + + /// + /// uint32x4_t vsm4eq_u32(uint32x4_t a, uint32x4_t b) + /// + public static unsafe Vector128 Sm4EncryptionAndDecryption(Vector128 a, Vector128 b) { throw new PlatformNotSupportedException(); } + + + /// Sm4KeyUpdates : SM4 Key takes an input as a 128-bit vector from the first source SIMD&FP register and a 128-bit constant from the second SIMD&FP register. It derives four iterations of the output key, in accordance with the SM4 standard, returning the 128-bit result to the destination SIMD&FP register. + + /// + /// uint32x4_t vsm4ekeyq_u32(uint32x4_t a, uint32x4_t b) + /// + public static unsafe Vector128 Sm4KeyUpdates(Vector128 a, Vector128 b) { throw new PlatformNotSupportedException(); } + + } +} + diff --git a/sve_api/out_cs_api/Sm4.System.Runtime.Intrinsics.cs b/sve_api/out_cs_api/Sm4.System.Runtime.Intrinsics.cs new file mode 100644 index 0000000000000..f045ec6ee1ee2 --- /dev/null +++ b/sve_api/out_cs_api/Sm4.System.Runtime.Intrinsics.cs @@ -0,0 +1,3 @@ + public static Vector128 Sm4EncryptionAndDecryption(Vector128 a, Vector128 b) { throw null; } + public static Vector128 Sm4KeyUpdates(Vector128 a, Vector128 b) { throw null; } + diff --git a/sve_api/out_cs_api/Sm4.cs b/sve_api/out_cs_api/Sm4.cs new file mode 100644 index 0000000000000..2c3ef81020649 --- /dev/null +++ b/sve_api/out_cs_api/Sm4.cs @@ -0,0 +1,47 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class Sm4 : AdvSimd + { + internal Sm4() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// Sm4EncryptionAndDecryption : SM4 Encode takes input data as a 128-bit vector from the first source SIMD&FP register, and four iterations of the round key held as the elements of the 128-bit vector in the second source SIMD&FP register. It encrypts the data by four rounds, in accordance with the SM4 standard, returning the 128-bit result to the destination SIMD&FP register. + + /// + /// uint32x4_t vsm4eq_u32(uint32x4_t a, uint32x4_t b) + /// + public static unsafe Vector128 Sm4EncryptionAndDecryption(Vector128 a, Vector128 b) => Sm4EncryptionAndDecryption(a, b); + + + /// Sm4KeyUpdates : SM4 Key takes an input as a 128-bit vector from the first source SIMD&FP register and a 128-bit constant from the second SIMD&FP register. It derives four iterations of the output key, in accordance with the SM4 standard, returning the 128-bit result to the destination SIMD&FP register. + + /// + /// uint32x4_t vsm4ekeyq_u32(uint32x4_t a, uint32x4_t b) + /// + public static unsafe Vector128 Sm4KeyUpdates(Vector128 a, Vector128 b) => Sm4KeyUpdates(a, b); + + } +} + diff --git a/sve_api/out_cs_api/Sve.PlatformNotSupported.cs b/sve_api/out_cs_api/Sve.PlatformNotSupported.cs new file mode 100644 index 0000000000000..70d67b3ddc56b --- /dev/null +++ b/sve_api/out_cs_api/Sve.PlatformNotSupported.cs @@ -0,0 +1,9649 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class Sve : AdvSimd + { + internal Sve() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// Abs : Absolute value + + /// + /// svint8_t svabs[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) + /// svint8_t svabs[_s8]_x(svbool_t pg, svint8_t op) + /// svint8_t svabs[_s8]_z(svbool_t pg, svint8_t op) + /// + public static unsafe Vector Abs(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svabs[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) + /// svint16_t svabs[_s16]_x(svbool_t pg, svint16_t op) + /// svint16_t svabs[_s16]_z(svbool_t pg, svint16_t op) + /// + public static unsafe Vector Abs(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svabs[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// svint32_t svabs[_s32]_x(svbool_t pg, svint32_t op) + /// svint32_t svabs[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector Abs(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svabs[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// svint64_t svabs[_s64]_x(svbool_t pg, svint64_t op) + /// svint64_t svabs[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector Abs(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svabs[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) + /// svfloat32_t svabs[_f32]_x(svbool_t pg, svfloat32_t op) + /// svfloat32_t svabs[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector Abs(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svabs[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) + /// svfloat64_t svabs[_f64]_x(svbool_t pg, svfloat64_t op) + /// svfloat64_t svabs[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector Abs(Vector value) { throw new PlatformNotSupportedException(); } + + + /// AbsoluteCompareGreaterThan : Absolute compare greater than + + /// + /// svbool_t svacgt[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector AbsoluteCompareGreaterThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svacgt[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector AbsoluteCompareGreaterThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AbsoluteCompareGreaterThanOrEqual : Absolute compare greater than or equal to + + /// + /// svbool_t svacge[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector AbsoluteCompareGreaterThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svacge[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector AbsoluteCompareGreaterThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AbsoluteCompareLessThan : Absolute compare less than + + /// + /// svbool_t svaclt[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector AbsoluteCompareLessThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svaclt[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector AbsoluteCompareLessThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AbsoluteCompareLessThanOrEqual : Absolute compare less than or equal to + + /// + /// svbool_t svacle[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector AbsoluteCompareLessThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svacle[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector AbsoluteCompareLessThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AbsoluteDifference : Absolute difference + + /// + /// svint8_t svabd[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svabd[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svabd[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svabd[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svabd[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svabd[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svabd[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svabd[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svabd[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svabd[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svabd[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svabd[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svabd[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svabd[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svabd[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svabd[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svabd[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svabd[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svabd[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svabd[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svabd[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svabd[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svabd[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svabd[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svabd[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svabd[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svabd[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svabd[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svabd[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svabd[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// Add : Add + + /// + /// svint8_t svadd[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svadd[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svadd[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector Add(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svadd[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svadd[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svadd[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector Add(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svadd[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svadd[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svadd[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector Add(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svadd[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svadd[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svadd[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector Add(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svadd[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svadd[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svadd[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector Add(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svadd[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svadd[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svadd[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector Add(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svadd[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svadd[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svadd[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector Add(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svadd[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svadd[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svadd[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector Add(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svadd[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svadd[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svadd[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector Add(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svadd[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svadd[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svadd[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector Add(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AddAcross : Add reduction + + /// + /// int64_t svaddv[_s8](svbool_t pg, svint8_t op) + /// + public static unsafe Vector AddAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t svaddv[_s16](svbool_t pg, svint16_t op) + /// + public static unsafe Vector AddAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t svaddv[_s32](svbool_t pg, svint32_t op) + /// + public static unsafe Vector AddAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t svaddv[_s64](svbool_t pg, svint64_t op) + /// + public static unsafe Vector AddAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svaddv[_u8](svbool_t pg, svuint8_t op) + /// + public static unsafe Vector AddAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svaddv[_u16](svbool_t pg, svuint16_t op) + /// + public static unsafe Vector AddAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svaddv[_u32](svbool_t pg, svuint32_t op) + /// + public static unsafe Vector AddAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svaddv[_u64](svbool_t pg, svuint64_t op) + /// + public static unsafe Vector AddAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// float32_t svaddv[_f32](svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector AddAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// float64_t svaddv[_f64](svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector AddAcross(Vector value) { throw new PlatformNotSupportedException(); } + + + /// AddRotateComplex : Complex add with rotate + + /// + /// svfloat32_t svcadd[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, uint64_t imm_rotation) + /// svfloat32_t svcadd[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, uint64_t imm_rotation) + /// svfloat32_t svcadd[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svcadd[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, uint64_t imm_rotation) + /// svfloat64_t svcadd[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, uint64_t imm_rotation) + /// svfloat64_t svcadd[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + + /// AddSaturate : Saturating add + + /// + /// svint8_t svqadd[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqadd[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqadd[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqadd[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svqadd[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svqadd[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svqadd[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svqadd[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AddSequentialAcross : Add reduction (strictly-ordered) + + /// + /// float32_t svadda[_f32](svbool_t pg, float32_t initial, svfloat32_t op) + /// + public static unsafe Vector AddSequentialAcross(Vector initial, Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// float64_t svadda[_f64](svbool_t pg, float64_t initial, svfloat64_t op) + /// + public static unsafe Vector AddSequentialAcross(Vector initial, Vector value) { throw new PlatformNotSupportedException(); } + + + /// And : Bitwise AND + + /// + /// svint8_t svand[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svand[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svand[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector And(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svand[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svand[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svand[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector And(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svand[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svand[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svand[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector And(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svand[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svand[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svand[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector And(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svand[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svand[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svand[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector And(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svand[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svand[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svand[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector And(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svand[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svand[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svand[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector And(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svand[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svand[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svand[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector And(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AndAcross : Bitwise AND reduction to scalar + + /// + /// int8_t svandv[_s8](svbool_t pg, svint8_t op) + /// + public static unsafe Vector AndAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int16_t svandv[_s16](svbool_t pg, svint16_t op) + /// + public static unsafe Vector AndAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int32_t svandv[_s32](svbool_t pg, svint32_t op) + /// + public static unsafe Vector AndAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t svandv[_s64](svbool_t pg, svint64_t op) + /// + public static unsafe Vector AndAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint8_t svandv[_u8](svbool_t pg, svuint8_t op) + /// + public static unsafe Vector AndAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint16_t svandv[_u16](svbool_t pg, svuint16_t op) + /// + public static unsafe Vector AndAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint32_t svandv[_u32](svbool_t pg, svuint32_t op) + /// + public static unsafe Vector AndAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svandv[_u64](svbool_t pg, svuint64_t op) + /// + public static unsafe Vector AndAcross(Vector value) { throw new PlatformNotSupportedException(); } + + + /// AndNot : Bitwise NAND + + /// + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector AndNot(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector AndNot(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector AndNot(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector AndNot(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector AndNot(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector AndNot(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector AndNot(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector AndNot(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// BitwiseClear : Bitwise clear + + /// + /// svint8_t svbic[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svbic[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svbic[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector BitwiseClear(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svbic[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svbic[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svbic[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector BitwiseClear(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svbic[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svbic[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svbic[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector BitwiseClear(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svbic[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svbic[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svbic[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector BitwiseClear(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svbic[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svbic[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svbic[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector BitwiseClear(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svbic[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svbic[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svbic[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector BitwiseClear(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svbic[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svbic[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svbic[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector BitwiseClear(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svbic[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svbic[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svbic[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector BitwiseClear(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// BooleanNot : Logically invert boolean condition + + /// + /// svint8_t svcnot[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) + /// svint8_t svcnot[_s8]_x(svbool_t pg, svint8_t op) + /// svint8_t svcnot[_s8]_z(svbool_t pg, svint8_t op) + /// + public static unsafe Vector BooleanNot(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svcnot[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) + /// svint16_t svcnot[_s16]_x(svbool_t pg, svint16_t op) + /// svint16_t svcnot[_s16]_z(svbool_t pg, svint16_t op) + /// + public static unsafe Vector BooleanNot(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svcnot[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// svint32_t svcnot[_s32]_x(svbool_t pg, svint32_t op) + /// svint32_t svcnot[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector BooleanNot(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svcnot[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// svint64_t svcnot[_s64]_x(svbool_t pg, svint64_t op) + /// svint64_t svcnot[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector BooleanNot(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svcnot[_u8]_m(svuint8_t inactive, svbool_t pg, svuint8_t op) + /// svuint8_t svcnot[_u8]_x(svbool_t pg, svuint8_t op) + /// svuint8_t svcnot[_u8]_z(svbool_t pg, svuint8_t op) + /// + public static unsafe Vector BooleanNot(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svcnot[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) + /// svuint16_t svcnot[_u16]_x(svbool_t pg, svuint16_t op) + /// svuint16_t svcnot[_u16]_z(svbool_t pg, svuint16_t op) + /// + public static unsafe Vector BooleanNot(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svcnot[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// svuint32_t svcnot[_u32]_x(svbool_t pg, svuint32_t op) + /// svuint32_t svcnot[_u32]_z(svbool_t pg, svuint32_t op) + /// + public static unsafe Vector BooleanNot(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svcnot[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// svuint64_t svcnot[_u64]_x(svbool_t pg, svuint64_t op) + /// svuint64_t svcnot[_u64]_z(svbool_t pg, svuint64_t op) + /// + public static unsafe Vector BooleanNot(Vector value) { throw new PlatformNotSupportedException(); } + + + + /// Compact : Shuffle active elements of vector to the right and fill with zero + + /// + /// svint32_t svcompact[_s32](svbool_t pg, svint32_t op) + /// + public static unsafe Vector Compact(Vector mask, Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svcompact[_s64](svbool_t pg, svint64_t op) + /// + public static unsafe Vector Compact(Vector mask, Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svcompact[_u32](svbool_t pg, svuint32_t op) + /// + public static unsafe Vector Compact(Vector mask, Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svcompact[_u64](svbool_t pg, svuint64_t op) + /// + public static unsafe Vector Compact(Vector mask, Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svcompact[_f32](svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector Compact(Vector mask, Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svcompact[_f64](svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector Compact(Vector mask, Vector value) { throw new PlatformNotSupportedException(); } + + + /// CompareEqual : Compare equal to + + /// + /// svbool_t svcmpeq[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpeq_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpeq[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpeq_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpeq[_s32](svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpeq_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpeq[_s64](svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpeq[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpeq[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpeq[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpeq[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpeq[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpeq[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// CompareGreaterThan : Compare greater than + + /// + /// svbool_t svcmpgt[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpgt_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpgt[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpgt_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpgt[_s32](svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpgt_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpgt[_s64](svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpgt[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpgt_wide[_u8](svbool_t pg, svuint8_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpgt[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpgt_wide[_u16](svbool_t pg, svuint16_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpgt[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpgt_wide[_u32](svbool_t pg, svuint32_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpgt[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpgt[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpgt[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// CompareGreaterThanOrEqual : Compare greater than or equal to + + /// + /// svbool_t svcmpge[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpge_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpge[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpge_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpge[_s32](svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpge_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpge[_s64](svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpge[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpge_wide[_u8](svbool_t pg, svuint8_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpge[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpge_wide[_u16](svbool_t pg, svuint16_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpge[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpge_wide[_u32](svbool_t pg, svuint32_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpge[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpge[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpge[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// CompareLessThan : Compare less than + + /// + /// svbool_t svcmplt[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmplt_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmplt[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmplt_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmplt[_s32](svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmplt_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmplt[_s64](svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmplt[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmplt_wide[_u8](svbool_t pg, svuint8_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmplt[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmplt_wide[_u16](svbool_t pg, svuint16_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmplt[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmplt_wide[_u32](svbool_t pg, svuint32_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmplt[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmplt[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmplt[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// CompareLessThanOrEqual : Compare less than or equal to + + /// + /// svbool_t svcmple[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmple_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmple[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmple_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmple[_s32](svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmple_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmple[_s64](svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmple[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmple_wide[_u8](svbool_t pg, svuint8_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmple[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmple_wide[_u16](svbool_t pg, svuint16_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmple[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmple_wide[_u32](svbool_t pg, svuint32_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmple[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmple[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmple[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// CompareNotEqualTo : Compare not equal to + + /// + /// svbool_t svcmpne[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpne_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpne[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpne_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpne[_s32](svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpne_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpne[_s64](svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpne[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpne[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpne[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpne[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpne[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpne[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// CompareUnordered : Compare unordered with + + /// + /// svbool_t svcmpuo[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector CompareUnordered(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svcmpuo[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector CompareUnordered(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// Compute16BitAddresses : Compute vector addresses for 16-bit data + + /// + /// svuint32_t svadrh[_u32base]_[s32]index(svuint32_t bases, svint32_t indices) + /// + public static unsafe Vector Compute16BitAddresses(Vector bases, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svadrh[_u32base]_[u32]index(svuint32_t bases, svuint32_t indices) + /// + public static unsafe Vector Compute16BitAddresses(Vector bases, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svadrh[_u64base]_[s64]index(svuint64_t bases, svint64_t indices) + /// + public static unsafe Vector Compute16BitAddresses(Vector bases, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svadrh[_u64base]_[u64]index(svuint64_t bases, svuint64_t indices) + /// + public static unsafe Vector Compute16BitAddresses(Vector bases, Vector indices) { throw new PlatformNotSupportedException(); } + + + /// Compute32BitAddresses : Compute vector addresses for 32-bit data + + /// + /// svuint32_t svadrw[_u32base]_[s32]index(svuint32_t bases, svint32_t indices) + /// + public static unsafe Vector Compute32BitAddresses(Vector bases, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svadrw[_u32base]_[u32]index(svuint32_t bases, svuint32_t indices) + /// + public static unsafe Vector Compute32BitAddresses(Vector bases, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svadrw[_u64base]_[s64]index(svuint64_t bases, svint64_t indices) + /// + public static unsafe Vector Compute32BitAddresses(Vector bases, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svadrw[_u64base]_[u64]index(svuint64_t bases, svuint64_t indices) + /// + public static unsafe Vector Compute32BitAddresses(Vector bases, Vector indices) { throw new PlatformNotSupportedException(); } + + + /// Compute64BitAddresses : Compute vector addresses for 64-bit data + + /// + /// svuint32_t svadrd[_u32base]_[s32]index(svuint32_t bases, svint32_t indices) + /// + public static unsafe Vector Compute64BitAddresses(Vector bases, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svadrd[_u32base]_[u32]index(svuint32_t bases, svuint32_t indices) + /// + public static unsafe Vector Compute64BitAddresses(Vector bases, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svadrd[_u64base]_[s64]index(svuint64_t bases, svint64_t indices) + /// + public static unsafe Vector Compute64BitAddresses(Vector bases, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svadrd[_u64base]_[u64]index(svuint64_t bases, svuint64_t indices) + /// + public static unsafe Vector Compute64BitAddresses(Vector bases, Vector indices) { throw new PlatformNotSupportedException(); } + + + /// Compute8BitAddresses : Compute vector addresses for 8-bit data + + /// + /// svuint32_t svadrb[_u32base]_[s32]offset(svuint32_t bases, svint32_t offsets) + /// + public static unsafe Vector Compute8BitAddresses(Vector bases, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svadrb[_u32base]_[u32]offset(svuint32_t bases, svuint32_t offsets) + /// + public static unsafe Vector Compute8BitAddresses(Vector bases, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svadrb[_u64base]_[s64]offset(svuint64_t bases, svint64_t offsets) + /// + public static unsafe Vector Compute8BitAddresses(Vector bases, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svadrb[_u64base]_[u64]offset(svuint64_t bases, svuint64_t offsets) + /// + public static unsafe Vector Compute8BitAddresses(Vector bases, Vector indices) { throw new PlatformNotSupportedException(); } + + + /// ConditionalExtractAfterLastActiveElement : Conditionally extract element after last + + /// + /// svint8_t svclasta[_s8](svbool_t pg, svint8_t fallback, svint8_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svint8_t svclasta[_s8](svbool_t pg, svint8_t fallback, svint8_t data) + /// int8_t svclasta[_n_s8](svbool_t pg, int8_t fallback, svint8_t data) + /// + public static unsafe sbyte ConditionalExtractAfterLastActiveElement(Vector mask, sbyte defaultValues, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svclasta[_s16](svbool_t pg, svint16_t fallback, svint16_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svclasta[_s16](svbool_t pg, svint16_t fallback, svint16_t data) + /// int16_t svclasta[_n_s16](svbool_t pg, int16_t fallback, svint16_t data) + /// + public static unsafe short ConditionalExtractAfterLastActiveElement(Vector mask, short defaultValues, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svclasta[_s32](svbool_t pg, svint32_t fallback, svint32_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svclasta[_s32](svbool_t pg, svint32_t fallback, svint32_t data) + /// int32_t svclasta[_n_s32](svbool_t pg, int32_t fallback, svint32_t data) + /// + public static unsafe int ConditionalExtractAfterLastActiveElement(Vector mask, int defaultValues, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svclasta[_s64](svbool_t pg, svint64_t fallback, svint64_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svclasta[_s64](svbool_t pg, svint64_t fallback, svint64_t data) + /// int64_t svclasta[_n_s64](svbool_t pg, int64_t fallback, svint64_t data) + /// + public static unsafe long ConditionalExtractAfterLastActiveElement(Vector mask, long defaultValues, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svclasta[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svclasta[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data) + /// uint8_t svclasta[_n_u8](svbool_t pg, uint8_t fallback, svuint8_t data) + /// + public static unsafe byte ConditionalExtractAfterLastActiveElement(Vector mask, byte defaultValues, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svclasta[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svclasta[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data) + /// uint16_t svclasta[_n_u16](svbool_t pg, uint16_t fallback, svuint16_t data) + /// + public static unsafe ushort ConditionalExtractAfterLastActiveElement(Vector mask, ushort defaultValues, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svclasta[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svclasta[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data) + /// uint32_t svclasta[_n_u32](svbool_t pg, uint32_t fallback, svuint32_t data) + /// + public static unsafe uint ConditionalExtractAfterLastActiveElement(Vector mask, uint defaultValues, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svclasta[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svclasta[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data) + /// uint64_t svclasta[_n_u64](svbool_t pg, uint64_t fallback, svuint64_t data) + /// + public static unsafe ulong ConditionalExtractAfterLastActiveElement(Vector mask, ulong defaultValues, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svclasta[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svclasta[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data) + /// float32_t svclasta[_n_f32](svbool_t pg, float32_t fallback, svfloat32_t data) + /// + public static unsafe float ConditionalExtractAfterLastActiveElement(Vector mask, float defaultValues, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svclasta[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svclasta[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data) + /// float64_t svclasta[_n_f64](svbool_t pg, float64_t fallback, svfloat64_t data) + /// + public static unsafe double ConditionalExtractAfterLastActiveElement(Vector mask, double defaultValues, Vector data) { throw new PlatformNotSupportedException(); } + + + /// ConditionalExtractAfterLastActiveElementAndReplicate : Conditionally extract element after last + + /// + /// svint8_t svclasta[_s8](svbool_t pg, svint8_t fallback, svint8_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svclasta[_s16](svbool_t pg, svint16_t fallback, svint16_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svclasta[_s32](svbool_t pg, svint32_t fallback, svint32_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svclasta[_s64](svbool_t pg, svint64_t fallback, svint64_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svclasta[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svclasta[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svclasta[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svclasta[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svclasta[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svclasta[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) { throw new PlatformNotSupportedException(); } + + + /// ConditionalExtractLastActiveElement : Conditionally extract last element + + /// + /// svint8_t svclastb[_s8](svbool_t pg, svint8_t fallback, svint8_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svint8_t svclastb[_s8](svbool_t pg, svint8_t fallback, svint8_t data) + /// int8_t svclastb[_n_s8](svbool_t pg, int8_t fallback, svint8_t data) + /// + public static unsafe sbyte ConditionalExtractLastActiveElement(Vector mask, sbyte defaultValues, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svclastb[_s16](svbool_t pg, svint16_t fallback, svint16_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svclastb[_s16](svbool_t pg, svint16_t fallback, svint16_t data) + /// int16_t svclastb[_n_s16](svbool_t pg, int16_t fallback, svint16_t data) + /// + public static unsafe short ConditionalExtractLastActiveElement(Vector mask, short defaultValues, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svclastb[_s32](svbool_t pg, svint32_t fallback, svint32_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svclastb[_s32](svbool_t pg, svint32_t fallback, svint32_t data) + /// int32_t svclastb[_n_s32](svbool_t pg, int32_t fallback, svint32_t data) + /// + public static unsafe int ConditionalExtractLastActiveElement(Vector mask, int defaultValues, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svclastb[_s64](svbool_t pg, svint64_t fallback, svint64_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svclastb[_s64](svbool_t pg, svint64_t fallback, svint64_t data) + /// int64_t svclastb[_n_s64](svbool_t pg, int64_t fallback, svint64_t data) + /// + public static unsafe long ConditionalExtractLastActiveElement(Vector mask, long defaultValues, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svclastb[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svclastb[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data) + /// uint8_t svclastb[_n_u8](svbool_t pg, uint8_t fallback, svuint8_t data) + /// + public static unsafe byte ConditionalExtractLastActiveElement(Vector mask, byte defaultValues, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svclastb[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svclastb[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data) + /// uint16_t svclastb[_n_u16](svbool_t pg, uint16_t fallback, svuint16_t data) + /// + public static unsafe ushort ConditionalExtractLastActiveElement(Vector mask, ushort defaultValues, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svclastb[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svclastb[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data) + /// uint32_t svclastb[_n_u32](svbool_t pg, uint32_t fallback, svuint32_t data) + /// + public static unsafe uint ConditionalExtractLastActiveElement(Vector mask, uint defaultValues, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svclastb[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svclastb[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data) + /// uint64_t svclastb[_n_u64](svbool_t pg, uint64_t fallback, svuint64_t data) + /// + public static unsafe ulong ConditionalExtractLastActiveElement(Vector mask, ulong defaultValues, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svclastb[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svclastb[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data) + /// float32_t svclastb[_n_f32](svbool_t pg, float32_t fallback, svfloat32_t data) + /// + public static unsafe float ConditionalExtractLastActiveElement(Vector mask, float defaultValues, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svclastb[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svclastb[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data) + /// float64_t svclastb[_n_f64](svbool_t pg, float64_t fallback, svfloat64_t data) + /// + public static unsafe double ConditionalExtractLastActiveElement(Vector mask, double defaultValues, Vector data) { throw new PlatformNotSupportedException(); } + + + /// ConditionalExtractLastActiveElementAndReplicate : Conditionally extract last element + + /// + /// svint8_t svclastb[_s8](svbool_t pg, svint8_t fallback, svint8_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svclastb[_s16](svbool_t pg, svint16_t fallback, svint16_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svclastb[_s32](svbool_t pg, svint32_t fallback, svint32_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svclastb[_s64](svbool_t pg, svint64_t fallback, svint64_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svclastb[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svclastb[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svclastb[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svclastb[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svclastb[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svclastb[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) { throw new PlatformNotSupportedException(); } + + + /// ConditionalSelect : Conditionally select elements + + /// + /// svint8_t svsel[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svsel[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svsel[_s32](svbool_t pg, svint32_t op1, svint32_t op2) + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svsel[_s64](svbool_t pg, svint64_t op1, svint64_t op2) + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svsel[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svsel[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svsel[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svsel[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svsel[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svsel[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// ConvertToDouble : Floating-point convert + + /// + /// svfloat64_t svcvt_f64[_s32]_m(svfloat64_t inactive, svbool_t pg, svint32_t op) + /// svfloat64_t svcvt_f64[_s32]_x(svbool_t pg, svint32_t op) + /// svfloat64_t svcvt_f64[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector ConvertToDouble(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svcvt_f64[_s64]_m(svfloat64_t inactive, svbool_t pg, svint64_t op) + /// svfloat64_t svcvt_f64[_s64]_x(svbool_t pg, svint64_t op) + /// svfloat64_t svcvt_f64[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector ConvertToDouble(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svcvt_f64[_u32]_m(svfloat64_t inactive, svbool_t pg, svuint32_t op) + /// svfloat64_t svcvt_f64[_u32]_x(svbool_t pg, svuint32_t op) + /// svfloat64_t svcvt_f64[_u32]_z(svbool_t pg, svuint32_t op) + /// + public static unsafe Vector ConvertToDouble(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svcvt_f64[_u64]_m(svfloat64_t inactive, svbool_t pg, svuint64_t op) + /// svfloat64_t svcvt_f64[_u64]_x(svbool_t pg, svuint64_t op) + /// svfloat64_t svcvt_f64[_u64]_z(svbool_t pg, svuint64_t op) + /// + public static unsafe Vector ConvertToDouble(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svcvt_f64[_f32]_m(svfloat64_t inactive, svbool_t pg, svfloat32_t op) + /// svfloat64_t svcvt_f64[_f32]_x(svbool_t pg, svfloat32_t op) + /// svfloat64_t svcvt_f64[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector ConvertToDouble(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ConvertToInt32 : Floating-point convert + + /// + /// svint32_t svcvt_s32[_f32]_m(svint32_t inactive, svbool_t pg, svfloat32_t op) + /// svint32_t svcvt_s32[_f32]_x(svbool_t pg, svfloat32_t op) + /// svint32_t svcvt_s32[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector ConvertToInt32(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svcvt_s32[_f64]_m(svint32_t inactive, svbool_t pg, svfloat64_t op) + /// svint32_t svcvt_s32[_f64]_x(svbool_t pg, svfloat64_t op) + /// svint32_t svcvt_s32[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector ConvertToInt32(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ConvertToInt64 : Floating-point convert + + /// + /// svint64_t svcvt_s64[_f32]_m(svint64_t inactive, svbool_t pg, svfloat32_t op) + /// svint64_t svcvt_s64[_f32]_x(svbool_t pg, svfloat32_t op) + /// svint64_t svcvt_s64[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector ConvertToInt64(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svcvt_s64[_f64]_m(svint64_t inactive, svbool_t pg, svfloat64_t op) + /// svint64_t svcvt_s64[_f64]_x(svbool_t pg, svfloat64_t op) + /// svint64_t svcvt_s64[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector ConvertToInt64(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ConvertToSingle : Floating-point convert + + /// + /// svfloat32_t svcvt_f32[_s32]_m(svfloat32_t inactive, svbool_t pg, svint32_t op) + /// svfloat32_t svcvt_f32[_s32]_x(svbool_t pg, svint32_t op) + /// svfloat32_t svcvt_f32[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector ConvertToSingle(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svcvt_f32[_s64]_m(svfloat32_t inactive, svbool_t pg, svint64_t op) + /// svfloat32_t svcvt_f32[_s64]_x(svbool_t pg, svint64_t op) + /// svfloat32_t svcvt_f32[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector ConvertToSingle(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svcvt_f32[_u32]_m(svfloat32_t inactive, svbool_t pg, svuint32_t op) + /// svfloat32_t svcvt_f32[_u32]_x(svbool_t pg, svuint32_t op) + /// svfloat32_t svcvt_f32[_u32]_z(svbool_t pg, svuint32_t op) + /// + public static unsafe Vector ConvertToSingle(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svcvt_f32[_u64]_m(svfloat32_t inactive, svbool_t pg, svuint64_t op) + /// svfloat32_t svcvt_f32[_u64]_x(svbool_t pg, svuint64_t op) + /// svfloat32_t svcvt_f32[_u64]_z(svbool_t pg, svuint64_t op) + /// + public static unsafe Vector ConvertToSingle(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svcvt_f32[_f64]_m(svfloat32_t inactive, svbool_t pg, svfloat64_t op) + /// svfloat32_t svcvt_f32[_f64]_x(svbool_t pg, svfloat64_t op) + /// svfloat32_t svcvt_f32[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector ConvertToSingle(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ConvertToUInt32 : Floating-point convert + + /// + /// svuint32_t svcvt_u32[_f32]_m(svuint32_t inactive, svbool_t pg, svfloat32_t op) + /// svuint32_t svcvt_u32[_f32]_x(svbool_t pg, svfloat32_t op) + /// svuint32_t svcvt_u32[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector ConvertToUInt32(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svcvt_u32[_f64]_m(svuint32_t inactive, svbool_t pg, svfloat64_t op) + /// svuint32_t svcvt_u32[_f64]_x(svbool_t pg, svfloat64_t op) + /// svuint32_t svcvt_u32[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector ConvertToUInt32(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ConvertToUInt64 : Floating-point convert + + /// + /// svuint64_t svcvt_u64[_f32]_m(svuint64_t inactive, svbool_t pg, svfloat32_t op) + /// svuint64_t svcvt_u64[_f32]_x(svbool_t pg, svfloat32_t op) + /// svuint64_t svcvt_u64[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector ConvertToUInt64(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svcvt_u64[_f64]_m(svuint64_t inactive, svbool_t pg, svfloat64_t op) + /// svuint64_t svcvt_u64[_f64]_x(svbool_t pg, svfloat64_t op) + /// svuint64_t svcvt_u64[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector ConvertToUInt64(Vector value) { throw new PlatformNotSupportedException(); } + + + /// Count16BitElements : Count the number of 16-bit elements in a vector + + /// + /// uint64_t svcnth_pat(enum svpattern pattern) + /// + public static unsafe ulong Count16BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + + /// Count32BitElements : Count the number of 32-bit elements in a vector + + /// + /// uint64_t svcntw_pat(enum svpattern pattern) + /// + public static unsafe ulong Count32BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + + /// Count64BitElements : Count the number of 64-bit elements in a vector + + /// + /// uint64_t svcntd_pat(enum svpattern pattern) + /// + public static unsafe ulong Count64BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + + /// Count8BitElements : Count the number of 8-bit elements in a vector + + /// + /// uint64_t svcntb_pat(enum svpattern pattern) + /// + public static unsafe ulong Count8BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + + + /// CreateBreakAfterMask : Break after first true condition + + /// + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + + /// CreateBreakAfterPropagateMask : Break after first true condition, propagating from previous partition + + /// + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// CreateBreakBeforeMask : Break before first true condition + + /// + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + + /// CreateBreakBeforePropagateMask : Break before first true condition, propagating from previous partition + + /// + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// CreateBreakPropagateMask : Propagate break to next partition + + /// + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + + /// CreateFalseMaskByte : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// + public static unsafe Vector CreateFalseMaskByte() { throw new PlatformNotSupportedException(); } + + + /// CreateFalseMaskDouble : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// + public static unsafe Vector CreateFalseMaskDouble() { throw new PlatformNotSupportedException(); } + + + /// CreateFalseMaskInt16 : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// + public static unsafe Vector CreateFalseMaskInt16() { throw new PlatformNotSupportedException(); } + + + /// CreateFalseMaskInt32 : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// + public static unsafe Vector CreateFalseMaskInt32() { throw new PlatformNotSupportedException(); } + + + /// CreateFalseMaskInt64 : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// + public static unsafe Vector CreateFalseMaskInt64() { throw new PlatformNotSupportedException(); } + + + /// CreateFalseMaskSByte : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// + public static unsafe Vector CreateFalseMaskSByte() { throw new PlatformNotSupportedException(); } + + + /// CreateFalseMaskSingle : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// + public static unsafe Vector CreateFalseMaskSingle() { throw new PlatformNotSupportedException(); } + + + /// CreateFalseMaskUInt16 : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// + public static unsafe Vector CreateFalseMaskUInt16() { throw new PlatformNotSupportedException(); } + + + /// CreateFalseMaskUInt32 : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// + public static unsafe Vector CreateFalseMaskUInt32() { throw new PlatformNotSupportedException(); } + + + /// CreateFalseMaskUInt64 : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// + public static unsafe Vector CreateFalseMaskUInt64() { throw new PlatformNotSupportedException(); } + + + /// CreateMaskForFirstActiveElement : Set the first active predicate element to true + + /// + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + + /// CreateMaskForNextActiveElement : Find next active predicate + + /// + /// svbool_t svpnext_b8(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateMaskForNextActiveElement(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svpnext_b16(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateMaskForNextActiveElement(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svpnext_b32(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateMaskForNextActiveElement(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svpnext_b64(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateMaskForNextActiveElement(Vector totalMask, Vector fromMask) { throw new PlatformNotSupportedException(); } + + + + /// CreateTrueMaskByte : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b8(enum svpattern pattern) + /// + public static unsafe Vector CreateTrueMaskByte([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + + /// CreateTrueMaskDouble : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b8(enum svpattern pattern) + /// + public static unsafe Vector CreateTrueMaskDouble([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + + /// CreateTrueMaskInt16 : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b8(enum svpattern pattern) + /// + public static unsafe Vector CreateTrueMaskInt16([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + + /// CreateTrueMaskInt32 : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b8(enum svpattern pattern) + /// + public static unsafe Vector CreateTrueMaskInt32([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + + /// CreateTrueMaskInt64 : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b8(enum svpattern pattern) + /// + public static unsafe Vector CreateTrueMaskInt64([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + + /// CreateTrueMaskSByte : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b8(enum svpattern pattern) + /// + public static unsafe Vector CreateTrueMaskSByte([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + + /// CreateTrueMaskSingle : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b8(enum svpattern pattern) + /// + public static unsafe Vector CreateTrueMaskSingle([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + + /// CreateTrueMaskUInt16 : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b16(enum svpattern pattern) + /// + public static unsafe Vector CreateTrueMaskUInt16([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + + /// CreateTrueMaskUInt32 : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b32(enum svpattern pattern) + /// + public static unsafe Vector CreateTrueMaskUInt32([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + + /// CreateTrueMaskUInt64 : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b64(enum svpattern pattern) + /// + public static unsafe Vector CreateTrueMaskUInt64([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + + /// CreateWhileLessThanMask16Bit : While incrementing scalar is less than + + /// + /// svbool_t svwhilelt_b16[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask16Bit(int left, int right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilelt_b16[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask16Bit(long left, long right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilelt_b16[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask16Bit(uint left, uint right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilelt_b16[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask16Bit(ulong left, ulong right) { throw new PlatformNotSupportedException(); } + + + /// CreateWhileLessThanMask32Bit : While incrementing scalar is less than + + /// + /// svbool_t svwhilelt_b32[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask32Bit(int left, int right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilelt_b32[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask32Bit(long left, long right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilelt_b32[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask32Bit(uint left, uint right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilelt_b32[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask32Bit(ulong left, ulong right) { throw new PlatformNotSupportedException(); } + + + /// CreateWhileLessThanMask64Bit : While incrementing scalar is less than + + /// + /// svbool_t svwhilelt_b64[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask64Bit(int left, int right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilelt_b64[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask64Bit(long left, long right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilelt_b64[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask64Bit(uint left, uint right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilelt_b64[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask64Bit(ulong left, ulong right) { throw new PlatformNotSupportedException(); } + + + /// CreateWhileLessThanMask8Bit : While incrementing scalar is less than + + /// + /// svbool_t svwhilelt_b8[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask8Bit(int left, int right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilelt_b8[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask8Bit(long left, long right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilelt_b8[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask8Bit(uint left, uint right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilelt_b8[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask8Bit(ulong left, ulong right) { throw new PlatformNotSupportedException(); } + + + /// CreateWhileLessThanOrEqualMask16Bit : While incrementing scalar is less than or equal to + + /// + /// svbool_t svwhilele_b16[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask16Bit(int left, int right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilele_b16[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask16Bit(long left, long right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilele_b16[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask16Bit(uint left, uint right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilele_b16[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask16Bit(ulong left, ulong right) { throw new PlatformNotSupportedException(); } + + + /// CreateWhileLessThanOrEqualMask32Bit : While incrementing scalar is less than or equal to + + /// + /// svbool_t svwhilele_b32[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask32Bit(int left, int right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilele_b32[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask32Bit(long left, long right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilele_b32[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask32Bit(uint left, uint right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilele_b32[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask32Bit(ulong left, ulong right) { throw new PlatformNotSupportedException(); } + + + /// CreateWhileLessThanOrEqualMask64Bit : While incrementing scalar is less than or equal to + + /// + /// svbool_t svwhilele_b64[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask64Bit(int left, int right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilele_b64[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask64Bit(long left, long right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilele_b64[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask64Bit(uint left, uint right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilele_b64[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask64Bit(ulong left, ulong right) { throw new PlatformNotSupportedException(); } + + + /// CreateWhileLessThanOrEqualMask8Bit : While incrementing scalar is less than or equal to + + /// + /// svbool_t svwhilele_b8[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask8Bit(int left, int right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilele_b8[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask8Bit(long left, long right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilele_b8[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask8Bit(uint left, uint right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilele_b8[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask8Bit(ulong left, ulong right) { throw new PlatformNotSupportedException(); } + + + /// Divide : Divide + + /// + /// svint32_t svdiv[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svdiv[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svdiv[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector Divide(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svdiv[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svdiv[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svdiv[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector Divide(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svdiv[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svdiv[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svdiv[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector Divide(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svdiv[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svdiv[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svdiv[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector Divide(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svdiv[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svdiv[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svdiv[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector Divide(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svdiv[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svdiv[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svdiv[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector Divide(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + + /// DotProduct : Dot product + + /// + /// svint32_t svdot[_s32](svint32_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector DotProduct(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svdot[_s64](svint64_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector DotProduct(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svdot[_u32](svuint32_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector DotProduct(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svdot[_u64](svuint64_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector DotProduct(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// DotProductBySelectedScalar : Dot product + + /// + /// svint32_t svdot_lane[_s32](svint32_t op1, svint8_t op2, svint8_t op3, uint64_t imm_index) + /// + public static unsafe Vector DotProductBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svdot_lane[_s64](svint64_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector DotProductBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svdot_lane[_u32](svuint32_t op1, svuint8_t op2, svuint8_t op3, uint64_t imm_index) + /// + public static unsafe Vector DotProductBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svdot_lane[_u64](svuint64_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector DotProductBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + + /// DuplicateSelectedScalarToVector : Broadcast a scalar value + + /// + /// svint8_t svdup_lane[_s8](svint8_t data, uint8_t index) + /// svint8_t svdupq_lane[_s8](svint8_t data, uint64_t index) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svdup_lane[_s16](svint16_t data, uint16_t index) + /// svint16_t svdupq_lane[_s16](svint16_t data, uint64_t index) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svdup_lane[_s32](svint32_t data, uint32_t index) + /// svint32_t svdupq_lane[_s32](svint32_t data, uint64_t index) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svdup_lane[_s64](svint64_t data, uint64_t index) + /// svint64_t svdupq_lane[_s64](svint64_t data, uint64_t index) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svdup_lane[_u8](svuint8_t data, uint8_t index) + /// svuint8_t svdupq_lane[_u8](svuint8_t data, uint64_t index) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svdup_lane[_u16](svuint16_t data, uint16_t index) + /// svuint16_t svdupq_lane[_u16](svuint16_t data, uint64_t index) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svdup_lane[_u32](svuint32_t data, uint32_t index) + /// svuint32_t svdupq_lane[_u32](svuint32_t data, uint64_t index) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svdup_lane[_u64](svuint64_t data, uint64_t index) + /// svuint64_t svdupq_lane[_u64](svuint64_t data, uint64_t index) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svdup_lane[_f32](svfloat32_t data, uint32_t index) + /// svfloat32_t svdupq_lane[_f32](svfloat32_t data, uint64_t index) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svdup_lane[_f64](svfloat64_t data, uint64_t index) + /// svfloat64_t svdupq_lane[_f64](svfloat64_t data, uint64_t index) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + + /// ExtractAfterLastScalar : Extract element after last + + /// + /// int8_t svlasta[_s8](svbool_t pg, svint8_t op) + /// + public static unsafe sbyte ExtractAfterLastScalar(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int16_t svlasta[_s16](svbool_t pg, svint16_t op) + /// + public static unsafe short ExtractAfterLastScalar(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int32_t svlasta[_s32](svbool_t pg, svint32_t op) + /// + public static unsafe int ExtractAfterLastScalar(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t svlasta[_s64](svbool_t pg, svint64_t op) + /// + public static unsafe long ExtractAfterLastScalar(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint8_t svlasta[_u8](svbool_t pg, svuint8_t op) + /// + public static unsafe byte ExtractAfterLastScalar(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint16_t svlasta[_u16](svbool_t pg, svuint16_t op) + /// + public static unsafe ushort ExtractAfterLastScalar(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint32_t svlasta[_u32](svbool_t pg, svuint32_t op) + /// + public static unsafe uint ExtractAfterLastScalar(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svlasta[_u64](svbool_t pg, svuint64_t op) + /// + public static unsafe ulong ExtractAfterLastScalar(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// float32_t svlasta[_f32](svbool_t pg, svfloat32_t op) + /// + public static unsafe float ExtractAfterLastScalar(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// float64_t svlasta[_f64](svbool_t pg, svfloat64_t op) + /// + public static unsafe double ExtractAfterLastScalar(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ExtractAfterLastVector : Extract element after last + + /// + /// int8_t svlasta[_s8](svbool_t pg, svint8_t op) + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int16_t svlasta[_s16](svbool_t pg, svint16_t op) + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int32_t svlasta[_s32](svbool_t pg, svint32_t op) + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t svlasta[_s64](svbool_t pg, svint64_t op) + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint8_t svlasta[_u8](svbool_t pg, svuint8_t op) + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint16_t svlasta[_u16](svbool_t pg, svuint16_t op) + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint32_t svlasta[_u32](svbool_t pg, svuint32_t op) + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svlasta[_u64](svbool_t pg, svuint64_t op) + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// float32_t svlasta[_f32](svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// float64_t svlasta[_f64](svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ExtractLastScalar : Extract last element + + /// + /// int8_t svlastb[_s8](svbool_t pg, svint8_t op) + /// + public static unsafe sbyte ExtractLastScalar(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int16_t svlastb[_s16](svbool_t pg, svint16_t op) + /// + public static unsafe short ExtractLastScalar(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int32_t svlastb[_s32](svbool_t pg, svint32_t op) + /// + public static unsafe int ExtractLastScalar(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t svlastb[_s64](svbool_t pg, svint64_t op) + /// + public static unsafe long ExtractLastScalar(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint8_t svlastb[_u8](svbool_t pg, svuint8_t op) + /// + public static unsafe byte ExtractLastScalar(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint16_t svlastb[_u16](svbool_t pg, svuint16_t op) + /// + public static unsafe ushort ExtractLastScalar(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint32_t svlastb[_u32](svbool_t pg, svuint32_t op) + /// + public static unsafe uint ExtractLastScalar(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svlastb[_u64](svbool_t pg, svuint64_t op) + /// + public static unsafe ulong ExtractLastScalar(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// float32_t svlastb[_f32](svbool_t pg, svfloat32_t op) + /// + public static unsafe float ExtractLastScalar(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// float64_t svlastb[_f64](svbool_t pg, svfloat64_t op) + /// + public static unsafe double ExtractLastScalar(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ExtractLastVector : Extract last element + + /// + /// int8_t svlastb[_s8](svbool_t pg, svint8_t op) + /// + public static unsafe Vector ExtractLastVector(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int16_t svlastb[_s16](svbool_t pg, svint16_t op) + /// + public static unsafe Vector ExtractLastVector(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int32_t svlastb[_s32](svbool_t pg, svint32_t op) + /// + public static unsafe Vector ExtractLastVector(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t svlastb[_s64](svbool_t pg, svint64_t op) + /// + public static unsafe Vector ExtractLastVector(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint8_t svlastb[_u8](svbool_t pg, svuint8_t op) + /// + public static unsafe Vector ExtractLastVector(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint16_t svlastb[_u16](svbool_t pg, svuint16_t op) + /// + public static unsafe Vector ExtractLastVector(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint32_t svlastb[_u32](svbool_t pg, svuint32_t op) + /// + public static unsafe Vector ExtractLastVector(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svlastb[_u64](svbool_t pg, svuint64_t op) + /// + public static unsafe Vector ExtractLastVector(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// float32_t svlastb[_f32](svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector ExtractLastVector(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// float64_t svlastb[_f64](svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector ExtractLastVector(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ExtractVector : Extract vector from pair of vectors + + /// + /// svint8_t svext[_s8](svint8_t op1, svint8_t op2, uint64_t imm3) + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svext[_s16](svint16_t op1, svint16_t op2, uint64_t imm3) + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svext[_s32](svint32_t op1, svint32_t op2, uint64_t imm3) + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svext[_s64](svint64_t op1, svint64_t op2, uint64_t imm3) + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svext[_u8](svuint8_t op1, svuint8_t op2, uint64_t imm3) + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svext[_u16](svuint16_t op1, svuint16_t op2, uint64_t imm3) + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svext[_u32](svuint32_t op1, svuint32_t op2, uint64_t imm3) + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svext[_u64](svuint64_t op1, svuint64_t op2, uint64_t imm3) + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svext[_f32](svfloat32_t op1, svfloat32_t op2, uint64_t imm3) + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svext[_f64](svfloat64_t op1, svfloat64_t op2, uint64_t imm3) + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + + /// FloatingPointExponentialAccelerator : Floating-point exponential accelerator + + /// + /// svfloat32_t svexpa[_f32](svuint32_t op) + /// + public static unsafe Vector FloatingPointExponentialAccelerator(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svexpa[_f64](svuint64_t op) + /// + public static unsafe Vector FloatingPointExponentialAccelerator(Vector value) { throw new PlatformNotSupportedException(); } + + + /// FusedMultiplyAdd : Multiply-add, addend first + + /// + /// svfloat32_t svmla[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// svfloat32_t svmla[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// svfloat32_t svmla[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// + public static unsafe Vector FusedMultiplyAdd(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svmla[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// svfloat64_t svmla[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// svfloat64_t svmla[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// + public static unsafe Vector FusedMultiplyAdd(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// FusedMultiplyAddBySelectedScalar : Multiply-add, addend first + + /// + /// svfloat32_t svmla_lane[_f32](svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_index) + /// + public static unsafe Vector FusedMultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svmla_lane[_f64](svfloat64_t op1, svfloat64_t op2, svfloat64_t op3, uint64_t imm_index) + /// + public static unsafe Vector FusedMultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + + /// FusedMultiplyAddNegated : Negated multiply-add, addend first + + /// + /// svfloat32_t svnmla[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// svfloat32_t svnmla[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// svfloat32_t svnmla[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// + public static unsafe Vector FusedMultiplyAddNegated(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svnmla[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// svfloat64_t svnmla[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// svfloat64_t svnmla[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// + public static unsafe Vector FusedMultiplyAddNegated(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// FusedMultiplySubtract : Multiply-subtract, minuend first + + /// + /// svfloat32_t svmls[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// svfloat32_t svmls[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// svfloat32_t svmls[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// + public static unsafe Vector FusedMultiplySubtract(Vector minuend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svmls[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// svfloat64_t svmls[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// svfloat64_t svmls[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// + public static unsafe Vector FusedMultiplySubtract(Vector minuend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// FusedMultiplySubtractBySelectedScalar : Multiply-subtract, minuend first + + /// + /// svfloat32_t svmls_lane[_f32](svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_index) + /// + public static unsafe Vector FusedMultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svmls_lane[_f64](svfloat64_t op1, svfloat64_t op2, svfloat64_t op3, uint64_t imm_index) + /// + public static unsafe Vector FusedMultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + + /// FusedMultiplySubtractNegated : Negated multiply-subtract, minuend first + + /// + /// svfloat32_t svnmls[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// svfloat32_t svnmls[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// svfloat32_t svnmls[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// + public static unsafe Vector FusedMultiplySubtractNegated(Vector minuend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svnmls[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// svfloat64_t svnmls[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// svfloat64_t svnmls[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// + public static unsafe Vector FusedMultiplySubtractNegated(Vector minuend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// GatherPrefetch16Bit : Prefetch halfwords + + /// + /// void svprfh_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfh_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfh_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfh_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfh_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfh_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfh_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfh_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfh_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfh_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfh_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfh_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + + /// GatherPrefetch32Bit : Prefetch words + + /// + /// void svprfw_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfw_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfw_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfw_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfw_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfw_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfw_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfw_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfw_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfw_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfw_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfw_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + + /// GatherPrefetch64Bit : Prefetch doublewords + + /// + /// void svprfd_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfd_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfd_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfd_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfd_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfd_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfd_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfd_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfd_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfd_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfd_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfd_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + + /// GatherPrefetch8Bit : Prefetch bytes + + /// + /// void svprfb_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, enum svprfop op) + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfb_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, enum svprfop op) + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfb_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfb_gather_[u32]offset(svbool_t pg, const void *base, svuint32_t offsets, enum svprfop op) + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfb_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfb_gather_[u64]offset(svbool_t pg, const void *base, svuint64_t offsets, enum svprfop op) + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfb_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, enum svprfop op) + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfb_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, enum svprfop op) + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfb_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfb_gather_[u32]offset(svbool_t pg, const void *base, svuint32_t offsets, enum svprfop op) + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfb_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + /// + /// void svprfb_gather_[u64]offset(svbool_t pg, const void *base, svuint64_t offsets, enum svprfop op) + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + + /// GatherVector : Unextended load + + /// + /// svint32_t svld1_gather_[s32]index[_s32](svbool_t pg, const int32_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVector(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svld1_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVector(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svld1_gather_[u32]index[_s32](svbool_t pg, const int32_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVector(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1_gather_[s64]index[_s64](svbool_t pg, const int64_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVector(Vector mask, long* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVector(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1_gather_[u64]index[_s64](svbool_t pg, const int64_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVector(Vector mask, long* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svld1_gather_[s32]index[_u32](svbool_t pg, const uint32_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVector(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svld1_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVector(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svld1_gather_[u32]index[_u32](svbool_t pg, const uint32_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVector(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1_gather_[s64]index[_u64](svbool_t pg, const uint64_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVector(Vector mask, ulong* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVector(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1_gather_[u64]index[_u64](svbool_t pg, const uint64_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVector(Vector mask, ulong* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svld1_gather_[s32]index[_f32](svbool_t pg, const float32_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVector(Vector mask, float* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svld1_gather[_u32base]_f32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVector(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svld1_gather_[u32]index[_f32](svbool_t pg, const float32_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVector(Vector mask, float* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svld1_gather_[s64]index[_f64](svbool_t pg, const float64_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVector(Vector mask, double* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svld1_gather[_u64base]_f64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVector(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svld1_gather_[u64]index[_f64](svbool_t pg, const float64_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVector(Vector mask, double* address, Vector indices) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorByteZeroExtend : Load 8-bit data and zero-extend + + /// + /// svint32_t svld1ub_gather_[s32]offset_s32(svbool_t pg, const uint8_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svld1ub_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svld1ub_gather_[u32]offset_s32(svbool_t pg, const uint8_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1ub_gather_[s64]offset_s64(svbool_t pg, const uint8_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1ub_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1ub_gather_[u64]offset_s64(svbool_t pg, const uint8_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svld1ub_gather_[s32]offset_u32(svbool_t pg, const uint8_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svld1ub_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svld1ub_gather_[u32]offset_u32(svbool_t pg, const uint8_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1ub_gather_[s64]offset_u64(svbool_t pg, const uint8_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1ub_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1ub_gather_[u64]offset_u64(svbool_t pg, const uint8_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorByteZeroExtendFirstFaulting : Load 8-bit data and zero-extend, first-faulting + + /// + /// svint32_t svldff1ub_gather_[s32]offset_s32(svbool_t pg, const uint8_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svldff1ub_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svldff1ub_gather_[u32]offset_s32(svbool_t pg, const uint8_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1ub_gather_[s64]offset_s64(svbool_t pg, const uint8_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1ub_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1ub_gather_[u64]offset_s64(svbool_t pg, const uint8_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldff1ub_gather_[s32]offset_u32(svbool_t pg, const uint8_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldff1ub_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldff1ub_gather_[u32]offset_u32(svbool_t pg, const uint8_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1ub_gather_[s64]offset_u64(svbool_t pg, const uint8_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1ub_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1ub_gather_[u64]offset_u64(svbool_t pg, const uint8_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorFirstFaulting : Unextended load, first-faulting + + /// + /// svint32_t svldff1_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svldff1_gather_[s32]index[_s32](svbool_t pg, const int32_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svldff1_gather_[u32]index[_s32](svbool_t pg, const int32_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1_gather_[s64]index[_s64](svbool_t pg, const int64_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, long* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1_gather_[u64]index[_s64](svbool_t pg, const int64_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, long* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldff1_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldff1_gather_[s32]index[_u32](svbool_t pg, const uint32_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldff1_gather_[u32]index[_u32](svbool_t pg, const uint32_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1_gather_[s64]index[_u64](svbool_t pg, const uint64_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, ulong* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1_gather_[u64]index[_u64](svbool_t pg, const uint64_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, ulong* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svldff1_gather_[s32]index[_f32](svbool_t pg, const float32_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, float* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svldff1_gather[_u32base]_f32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svldff1_gather_[u32]index[_f32](svbool_t pg, const float32_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, float* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svldff1_gather_[s64]index[_f64](svbool_t pg, const float64_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, double* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svldff1_gather[_u64base]_f64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svldff1_gather_[u64]index[_f64](svbool_t pg, const float64_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, double* address, Vector indices) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorInt16SignExtend : Load 16-bit data and sign-extend + + /// + /// svint32_t svld1sh_gather_[s32]index_s32(svbool_t pg, const int16_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svld1sh_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svld1sh_gather_[u32]index_s32(svbool_t pg, const int16_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1sh_gather_[s64]index_s64(svbool_t pg, const int16_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1sh_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1sh_gather_[u64]index_s64(svbool_t pg, const int16_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svld1sh_gather_[s32]index_u32(svbool_t pg, const int16_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svld1sh_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svld1sh_gather_[u32]index_u32(svbool_t pg, const int16_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1sh_gather_[s64]index_u64(svbool_t pg, const int16_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1sh_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1sh_gather_[u64]index_u64(svbool_t pg, const int16_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorInt16SignExtendFirstFaulting : Load 16-bit data and sign-extend, first-faulting + + /// + /// svint32_t svldff1sh_gather_[s32]index_s32(svbool_t pg, const int16_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svldff1sh_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svldff1sh_gather_[u32]index_s32(svbool_t pg, const int16_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1sh_gather_[s64]index_s64(svbool_t pg, const int16_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1sh_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1sh_gather_[u64]index_s64(svbool_t pg, const int16_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldff1sh_gather_[s32]index_u32(svbool_t pg, const int16_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldff1sh_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldff1sh_gather_[u32]index_u32(svbool_t pg, const int16_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1sh_gather_[s64]index_u64(svbool_t pg, const int16_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1sh_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1sh_gather_[u64]index_u64(svbool_t pg, const int16_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorInt16WithByteOffsetsSignExtend : Load 16-bit data and sign-extend + + /// + /// svint32_t svld1sh_gather_[s32]offset_s32(svbool_t pg, const int16_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svld1sh_gather_[u32]offset_s32(svbool_t pg, const int16_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1sh_gather_[s64]offset_s64(svbool_t pg, const int16_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1sh_gather_[u64]offset_s64(svbool_t pg, const int16_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svld1sh_gather_[s32]offset_u32(svbool_t pg, const int16_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svld1sh_gather_[u32]offset_u32(svbool_t pg, const int16_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1sh_gather_[s64]offset_u64(svbool_t pg, const int16_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1sh_gather_[u64]offset_u64(svbool_t pg, const int16_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting : Load 16-bit data and sign-extend, first-faulting + + /// + /// svint32_t svldff1sh_gather_[s32]offset_s32(svbool_t pg, const int16_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svldff1sh_gather_[u32]offset_s32(svbool_t pg, const int16_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1sh_gather_[s64]offset_s64(svbool_t pg, const int16_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1sh_gather_[u64]offset_s64(svbool_t pg, const int16_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldff1sh_gather_[s32]offset_u32(svbool_t pg, const int16_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldff1sh_gather_[u32]offset_u32(svbool_t pg, const int16_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1sh_gather_[s64]offset_u64(svbool_t pg, const int16_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1sh_gather_[u64]offset_u64(svbool_t pg, const int16_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorInt32SignExtend : Load 32-bit data and sign-extend + + /// + /// svint64_t svld1sw_gather_[s64]index_s64(svbool_t pg, const int32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1sw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1sw_gather_[u64]index_s64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1sw_gather_[s64]index_s64(svbool_t pg, const int32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1sw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1sw_gather_[u64]index_s64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1sw_gather_[s64]index_u64(svbool_t pg, const int32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1sw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1sw_gather_[u64]index_u64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1sw_gather_[s64]index_u64(svbool_t pg, const int32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1sw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1sw_gather_[u64]index_u64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorInt32SignExtendFirstFaulting : Load 32-bit data and sign-extend, first-faulting + + /// + /// svint64_t svldff1sw_gather_[s64]index_s64(svbool_t pg, const int32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1sw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1sw_gather_[u64]index_s64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1sw_gather_[s64]index_s64(svbool_t pg, const int32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1sw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1sw_gather_[u64]index_s64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1sw_gather_[s64]index_u64(svbool_t pg, const int32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1sw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1sw_gather_[u64]index_u64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1sw_gather_[s64]index_u64(svbool_t pg, const int32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1sw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1sw_gather_[u64]index_u64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorInt32WithByteOffsetsSignExtend : Load 32-bit data and sign-extend + + /// + /// svint64_t svld1sw_gather_[s64]offset_s64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1sw_gather_[u64]offset_s64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1sw_gather_[s64]offset_s64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1sw_gather_[u64]offset_s64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1sw_gather_[s64]offset_u64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1sw_gather_[u64]offset_u64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1sw_gather_[s64]offset_u64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1sw_gather_[u64]offset_u64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting : Load 32-bit data and sign-extend, first-faulting + + /// + /// svint64_t svldff1sw_gather_[s64]offset_s64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1sw_gather_[u64]offset_s64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1sw_gather_[s64]offset_s64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1sw_gather_[u64]offset_s64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1sw_gather_[s64]offset_u64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1sw_gather_[u64]offset_u64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1sw_gather_[s64]offset_u64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1sw_gather_[u64]offset_u64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorSByteSignExtend : Load 8-bit data and sign-extend + + /// + /// svint32_t svld1sb_gather_[s32]offset_s32(svbool_t pg, const int8_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svld1sb_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svld1sb_gather_[u32]offset_s32(svbool_t pg, const int8_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1sb_gather_[s64]offset_s64(svbool_t pg, const int8_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1sb_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1sb_gather_[u64]offset_s64(svbool_t pg, const int8_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svld1sb_gather_[s32]offset_u32(svbool_t pg, const int8_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svld1sb_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svld1sb_gather_[u32]offset_u32(svbool_t pg, const int8_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1sb_gather_[s64]offset_u64(svbool_t pg, const int8_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1sb_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1sb_gather_[u64]offset_u64(svbool_t pg, const int8_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorSByteSignExtendFirstFaulting : Load 8-bit data and sign-extend, first-faulting + + /// + /// svint32_t svldff1sb_gather_[s32]offset_s32(svbool_t pg, const int8_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svldff1sb_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svldff1sb_gather_[u32]offset_s32(svbool_t pg, const int8_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1sb_gather_[s64]offset_s64(svbool_t pg, const int8_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1sb_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1sb_gather_[u64]offset_s64(svbool_t pg, const int8_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldff1sb_gather_[s32]offset_u32(svbool_t pg, const int8_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldff1sb_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldff1sb_gather_[u32]offset_u32(svbool_t pg, const int8_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1sb_gather_[s64]offset_u64(svbool_t pg, const int8_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1sb_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1sb_gather_[u64]offset_u64(svbool_t pg, const int8_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorUInt16WithByteOffsetsZeroExtend : Load 16-bit data and zero-extend + + /// + /// svint32_t svld1uh_gather_[s32]offset_s32(svbool_t pg, const uint16_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svld1uh_gather_[u32]offset_s32(svbool_t pg, const uint16_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1uh_gather_[s64]offset_s64(svbool_t pg, const uint16_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1uh_gather_[u64]offset_s64(svbool_t pg, const uint16_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svld1uh_gather_[s32]offset_u32(svbool_t pg, const uint16_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svld1uh_gather_[u32]offset_u32(svbool_t pg, const uint16_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1uh_gather_[s64]offset_u64(svbool_t pg, const uint16_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1uh_gather_[u64]offset_u64(svbool_t pg, const uint16_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting : Load 16-bit data and zero-extend, first-faulting + + /// + /// svint32_t svldff1uh_gather_[s32]offset_s32(svbool_t pg, const uint16_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svldff1uh_gather_[u32]offset_s32(svbool_t pg, const uint16_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1uh_gather_[s64]offset_s64(svbool_t pg, const uint16_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1uh_gather_[u64]offset_s64(svbool_t pg, const uint16_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldff1uh_gather_[s32]offset_u32(svbool_t pg, const uint16_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldff1uh_gather_[u32]offset_u32(svbool_t pg, const uint16_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1uh_gather_[s64]offset_u64(svbool_t pg, const uint16_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1uh_gather_[u64]offset_u64(svbool_t pg, const uint16_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorUInt16ZeroExtend : Load 16-bit data and zero-extend + + /// + /// svint32_t svld1uh_gather_[s32]index_s32(svbool_t pg, const uint16_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svld1uh_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svld1uh_gather_[u32]index_s32(svbool_t pg, const uint16_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1uh_gather_[s64]index_s64(svbool_t pg, const uint16_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1uh_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1uh_gather_[u64]index_s64(svbool_t pg, const uint16_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svld1uh_gather_[s32]index_u32(svbool_t pg, const uint16_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svld1uh_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svld1uh_gather_[u32]index_u32(svbool_t pg, const uint16_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1uh_gather_[s64]index_u64(svbool_t pg, const uint16_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1uh_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1uh_gather_[u64]index_u64(svbool_t pg, const uint16_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorUInt16ZeroExtendFirstFaulting : Load 16-bit data and zero-extend, first-faulting + + /// + /// svint32_t svldff1uh_gather_[s32]index_s32(svbool_t pg, const uint16_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svldff1uh_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svldff1uh_gather_[u32]index_s32(svbool_t pg, const uint16_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1uh_gather_[s64]index_s64(svbool_t pg, const uint16_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1uh_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1uh_gather_[u64]index_s64(svbool_t pg, const uint16_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldff1uh_gather_[s32]index_u32(svbool_t pg, const uint16_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldff1uh_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldff1uh_gather_[u32]index_u32(svbool_t pg, const uint16_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1uh_gather_[s64]index_u64(svbool_t pg, const uint16_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1uh_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1uh_gather_[u64]index_u64(svbool_t pg, const uint16_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorUInt32WithByteOffsetsZeroExtend : Load 32-bit data and zero-extend + + /// + /// svint64_t svld1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting : Load 32-bit data and zero-extend, first-faulting + + /// + /// svint64_t svldff1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorUInt32ZeroExtend : Load 32-bit data and zero-extend + + /// + /// svint64_t svld1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorUInt32ZeroExtendFirstFaulting : Load 32-bit data and zero-extend, first-faulting + + /// + /// svint64_t svldff1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorWithByteOffsetFirstFaulting : Unextended load, first-faulting + + /// + /// svint32_t svldff1_gather_[s32]offset[_s32](svbool_t pg, const int32_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svldff1_gather_[u32]offset[_s32](svbool_t pg, const int32_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1_gather_[s64]offset[_s64](svbool_t pg, const int64_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, long* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1_gather_[u64]offset[_s64](svbool_t pg, const int64_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, long* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldff1_gather_[s32]offset[_u32](svbool_t pg, const uint32_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldff1_gather_[u32]offset[_u32](svbool_t pg, const uint32_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1_gather_[s64]offset[_u64](svbool_t pg, const uint64_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, ulong* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1_gather_[u64]offset[_u64](svbool_t pg, const uint64_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, ulong* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svldff1_gather_[s32]offset[_f32](svbool_t pg, const float32_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, float* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svldff1_gather_[u32]offset[_f32](svbool_t pg, const float32_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, float* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svldff1_gather_[s64]offset[_f64](svbool_t pg, const float64_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, double* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svldff1_gather_[u64]offset[_f64](svbool_t pg, const float64_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, double* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorWithByteOffsets : Unextended load + + /// + /// svint32_t svld1_gather_[s32]offset[_s32](svbool_t pg, const int32_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svld1_gather_[u32]offset[_s32](svbool_t pg, const int32_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1_gather_[s64]offset[_s64](svbool_t pg, const int64_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, long* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1_gather_[u64]offset[_s64](svbool_t pg, const int64_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, long* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svld1_gather_[s32]offset[_u32](svbool_t pg, const uint32_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svld1_gather_[u32]offset[_u32](svbool_t pg, const uint32_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1_gather_[s64]offset[_u64](svbool_t pg, const uint64_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, ulong* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1_gather_[u64]offset[_u64](svbool_t pg, const uint64_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, ulong* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svld1_gather_[s32]offset[_f32](svbool_t pg, const float32_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, float* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svld1_gather_[u32]offset[_f32](svbool_t pg, const float32_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, float* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svld1_gather_[s64]offset[_f64](svbool_t pg, const float64_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, double* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svld1_gather_[u64]offset[_f64](svbool_t pg, const float64_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, double* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + + /// GetActiveElementCount : Count set predicate bits + + /// + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svcntp_b16(svbool_t pg, svbool_t op) + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svcntp_b32(svbool_t pg, svbool_t op) + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svcntp_b64(svbool_t pg, svbool_t op) + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) { throw new PlatformNotSupportedException(); } + + + /// GetFfr : Read FFR, returning predicate of succesfully loaded elements + + /// + /// svbool_t svrdffr() + /// svbool_t svrdffr_z(svbool_t pg) + /// + public static unsafe Vector GetFfr() { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svrdffr() + /// svbool_t svrdffr_z(svbool_t pg) + /// + public static unsafe Vector GetFfr() { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svrdffr() + /// svbool_t svrdffr_z(svbool_t pg) + /// + public static unsafe Vector GetFfr() { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svrdffr() + /// svbool_t svrdffr_z(svbool_t pg) + /// + public static unsafe Vector GetFfr() { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svrdffr() + /// svbool_t svrdffr_z(svbool_t pg) + /// + public static unsafe Vector GetFfr() { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svrdffr() + /// svbool_t svrdffr_z(svbool_t pg) + /// + public static unsafe Vector GetFfr() { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svrdffr() + /// svbool_t svrdffr_z(svbool_t pg) + /// + public static unsafe Vector GetFfr() { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svrdffr() + /// svbool_t svrdffr_z(svbool_t pg) + /// + public static unsafe Vector GetFfr() { throw new PlatformNotSupportedException(); } + + + /// InsertIntoShiftedVector : Insert scalar into shifted vector + + /// + /// svint8_t svinsr[_n_s8](svint8_t op1, int8_t op2) + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, sbyte right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svinsr[_n_s16](svint16_t op1, int16_t op2) + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, short right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svinsr[_n_s32](svint32_t op1, int32_t op2) + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, int right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svinsr[_n_s64](svint64_t op1, int64_t op2) + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, long right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svinsr[_n_u8](svuint8_t op1, uint8_t op2) + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, byte right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svinsr[_n_u16](svuint16_t op1, uint16_t op2) + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, ushort right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svinsr[_n_u32](svuint32_t op1, uint32_t op2) + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, uint right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svinsr[_n_u64](svuint64_t op1, uint64_t op2) + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, ulong right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svinsr[_n_f32](svfloat32_t op1, float32_t op2) + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, float right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svinsr[_n_f64](svfloat64_t op1, float64_t op2) + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, double right) { throw new PlatformNotSupportedException(); } + + + /// LeadingSignCount : Count leading sign bits + + /// + /// svuint8_t svcls[_s8]_m(svuint8_t inactive, svbool_t pg, svint8_t op) + /// svuint8_t svcls[_s8]_x(svbool_t pg, svint8_t op) + /// svuint8_t svcls[_s8]_z(svbool_t pg, svint8_t op) + /// + public static unsafe Vector LeadingSignCount(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svcls[_s16]_m(svuint16_t inactive, svbool_t pg, svint16_t op) + /// svuint16_t svcls[_s16]_x(svbool_t pg, svint16_t op) + /// svuint16_t svcls[_s16]_z(svbool_t pg, svint16_t op) + /// + public static unsafe Vector LeadingSignCount(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svcls[_s32]_m(svuint32_t inactive, svbool_t pg, svint32_t op) + /// svuint32_t svcls[_s32]_x(svbool_t pg, svint32_t op) + /// svuint32_t svcls[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector LeadingSignCount(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svcls[_s64]_m(svuint64_t inactive, svbool_t pg, svint64_t op) + /// svuint64_t svcls[_s64]_x(svbool_t pg, svint64_t op) + /// svuint64_t svcls[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector LeadingSignCount(Vector value) { throw new PlatformNotSupportedException(); } + + + /// LeadingZeroCount : Count leading zero bits + + /// + /// svuint8_t svclz[_s8]_m(svuint8_t inactive, svbool_t pg, svint8_t op) + /// svuint8_t svclz[_s8]_x(svbool_t pg, svint8_t op) + /// svuint8_t svclz[_s8]_z(svbool_t pg, svint8_t op) + /// + public static unsafe Vector LeadingZeroCount(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svclz[_u8]_m(svuint8_t inactive, svbool_t pg, svuint8_t op) + /// svuint8_t svclz[_u8]_x(svbool_t pg, svuint8_t op) + /// svuint8_t svclz[_u8]_z(svbool_t pg, svuint8_t op) + /// + public static unsafe Vector LeadingZeroCount(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svclz[_s16]_m(svuint16_t inactive, svbool_t pg, svint16_t op) + /// svuint16_t svclz[_s16]_x(svbool_t pg, svint16_t op) + /// svuint16_t svclz[_s16]_z(svbool_t pg, svint16_t op) + /// + public static unsafe Vector LeadingZeroCount(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svclz[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) + /// svuint16_t svclz[_u16]_x(svbool_t pg, svuint16_t op) + /// svuint16_t svclz[_u16]_z(svbool_t pg, svuint16_t op) + /// + public static unsafe Vector LeadingZeroCount(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svclz[_s32]_m(svuint32_t inactive, svbool_t pg, svint32_t op) + /// svuint32_t svclz[_s32]_x(svbool_t pg, svint32_t op) + /// svuint32_t svclz[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector LeadingZeroCount(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svclz[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// svuint32_t svclz[_u32]_x(svbool_t pg, svuint32_t op) + /// svuint32_t svclz[_u32]_z(svbool_t pg, svuint32_t op) + /// + public static unsafe Vector LeadingZeroCount(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svclz[_s64]_m(svuint64_t inactive, svbool_t pg, svint64_t op) + /// svuint64_t svclz[_s64]_x(svbool_t pg, svint64_t op) + /// svuint64_t svclz[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector LeadingZeroCount(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svclz[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// svuint64_t svclz[_u64]_x(svbool_t pg, svuint64_t op) + /// svuint64_t svclz[_u64]_z(svbool_t pg, svuint64_t op) + /// + public static unsafe Vector LeadingZeroCount(Vector value) { throw new PlatformNotSupportedException(); } + + + /// LoadVector : Unextended load + + /// + /// svint8_t svld1[_s8](svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVector(Vector mask, sbyte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svld1[_s16](svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVector(Vector mask, short* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svld1[_s32](svbool_t pg, const int32_t *base) + /// + public static unsafe Vector LoadVector(Vector mask, int* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1[_s64](svbool_t pg, const int64_t *base) + /// + public static unsafe Vector LoadVector(Vector mask, long* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svld1[_u8](svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVector(Vector mask, byte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svld1[_u16](svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVector(Vector mask, ushort* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svld1[_u32](svbool_t pg, const uint32_t *base) + /// + public static unsafe Vector LoadVector(Vector mask, uint* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1[_u64](svbool_t pg, const uint64_t *base) + /// + public static unsafe Vector LoadVector(Vector mask, ulong* address) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svld1[_f32](svbool_t pg, const float32_t *base) + /// + public static unsafe Vector LoadVector(Vector mask, float* address) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svld1[_f64](svbool_t pg, const float64_t *base) + /// + public static unsafe Vector LoadVector(Vector mask, double* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVector128AndReplicateToVector : Load and replicate 128 bits of data + + /// + /// svint8_t svld1rq[_s8](svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, sbyte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svld1rq[_s16](svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, short* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svld1rq[_s32](svbool_t pg, const int32_t *base) + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, int* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1rq[_s64](svbool_t pg, const int64_t *base) + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, long* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svld1rq[_u8](svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, byte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svld1rq[_u16](svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, ushort* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svld1rq[_u32](svbool_t pg, const uint32_t *base) + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, uint* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1rq[_u64](svbool_t pg, const uint64_t *base) + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, ulong* address) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svld1rq[_f32](svbool_t pg, const float32_t *base) + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, float* address) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svld1rq[_f64](svbool_t pg, const float64_t *base) + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, double* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorByteNonFaultingZeroExtendToInt16 : Load 8-bit data and zero-extend, non-faulting + + /// + /// svint16_t svldnf1ub_s16(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToInt16(byte* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorByteNonFaultingZeroExtendToInt32 : Load 8-bit data and zero-extend, non-faulting + + /// + /// svint32_t svldnf1ub_s32(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToInt32(byte* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorByteNonFaultingZeroExtendToInt64 : Load 8-bit data and zero-extend, non-faulting + + /// + /// svint64_t svldnf1ub_s64(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToInt64(byte* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorByteNonFaultingZeroExtendToUInt16 : Load 8-bit data and zero-extend, non-faulting + + /// + /// svuint16_t svldnf1ub_u16(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToUInt16(byte* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorByteNonFaultingZeroExtendToUInt32 : Load 8-bit data and zero-extend, non-faulting + + /// + /// svuint32_t svldnf1ub_u32(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToUInt32(byte* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorByteNonFaultingZeroExtendToUInt64 : Load 8-bit data and zero-extend, non-faulting + + /// + /// svuint64_t svldnf1ub_u64(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToUInt64(byte* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorByteZeroExtendFirstFaulting : Load 8-bit data and zero-extend, first-faulting + + /// + /// svint16_t svldff1ub_s16(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svldff1ub_s32(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1ub_s64(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svldff1ub_u16(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldff1ub_u32(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1ub_u64(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorByteZeroExtendToInt16 : Load 8-bit data and zero-extend + + /// + /// svint16_t svld1ub_s16(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteZeroExtendToInt16(Vector mask, byte* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorByteZeroExtendToInt32 : Load 8-bit data and zero-extend + + /// + /// svint32_t svld1ub_s32(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteZeroExtendToInt32(Vector mask, byte* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorByteZeroExtendToInt64 : Load 8-bit data and zero-extend + + /// + /// svint64_t svld1ub_s64(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteZeroExtendToInt64(Vector mask, byte* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorByteZeroExtendToUInt16 : Load 8-bit data and zero-extend + + /// + /// svuint16_t svld1ub_u16(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteZeroExtendToUInt16(Vector mask, byte* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorByteZeroExtendToUInt32 : Load 8-bit data and zero-extend + + /// + /// svuint32_t svld1ub_u32(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteZeroExtendToUInt32(Vector mask, byte* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorByteZeroExtendToUInt64 : Load 8-bit data and zero-extend + + /// + /// svuint64_t svld1ub_u64(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteZeroExtendToUInt64(Vector mask, byte* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorFirstFaulting : Unextended load, first-faulting + + /// + /// svint8_t svldff1[_s8](svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, sbyte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svldff1[_s16](svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, short* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svldff1[_s32](svbool_t pg, const int32_t *base) + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, int* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1[_s64](svbool_t pg, const int64_t *base) + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, long* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svldff1[_u8](svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, byte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svldff1[_u16](svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, ushort* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldff1[_u32](svbool_t pg, const uint32_t *base) + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, uint* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1[_u64](svbool_t pg, const uint64_t *base) + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, ulong* address) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svldff1[_f32](svbool_t pg, const float32_t *base) + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, float* address) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svldff1[_f64](svbool_t pg, const float64_t *base) + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, double* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorInt16NonFaultingSignExtendToInt32 : Load 16-bit data and sign-extend, non-faulting + + /// + /// svint32_t svldnf1sh_s32(svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToInt32(short* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorInt16NonFaultingSignExtendToInt64 : Load 16-bit data and sign-extend, non-faulting + + /// + /// svint64_t svldnf1sh_s64(svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToInt64(short* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorInt16NonFaultingSignExtendToUInt32 : Load 16-bit data and sign-extend, non-faulting + + /// + /// svuint32_t svldnf1sh_u32(svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToUInt32(short* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorInt16NonFaultingSignExtendToUInt64 : Load 16-bit data and sign-extend, non-faulting + + /// + /// svuint64_t svldnf1sh_u64(svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToUInt64(short* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorInt16SignExtendFirstFaulting : Load 16-bit data and sign-extend, first-faulting + + /// + /// svint32_t svldff1sh_s32(svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorInt16SignExtendFirstFaulting(Vector mask, short* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1sh_s64(svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorInt16SignExtendFirstFaulting(Vector mask, short* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldff1sh_u32(svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorInt16SignExtendFirstFaulting(Vector mask, short* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1sh_u64(svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorInt16SignExtendFirstFaulting(Vector mask, short* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorInt16SignExtendToInt32 : Load 16-bit data and sign-extend + + /// + /// svint32_t svld1sh_s32(svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorInt16SignExtendToInt32(Vector mask, short* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorInt16SignExtendToInt64 : Load 16-bit data and sign-extend + + /// + /// svint64_t svld1sh_s64(svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorInt16SignExtendToInt64(Vector mask, short* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorInt16SignExtendToUInt32 : Load 16-bit data and sign-extend + + /// + /// svuint32_t svld1sh_u32(svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorInt16SignExtendToUInt32(Vector mask, short* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorInt16SignExtendToUInt64 : Load 16-bit data and sign-extend + + /// + /// svuint64_t svld1sh_u64(svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorInt16SignExtendToUInt64(Vector mask, short* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorInt32NonFaultingSignExtendToInt64 : Load 32-bit data and sign-extend, non-faulting + + /// + /// svint64_t svldnf1sw_s64(svbool_t pg, const int32_t *base) + /// + public static unsafe Vector LoadVectorInt32NonFaultingSignExtendToInt64(int* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorInt32NonFaultingSignExtendToUInt64 : Load 32-bit data and sign-extend, non-faulting + + /// + /// svuint64_t svldnf1sw_u64(svbool_t pg, const int32_t *base) + /// + public static unsafe Vector LoadVectorInt32NonFaultingSignExtendToUInt64(int* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorInt32SignExtendFirstFaulting : Load 32-bit data and sign-extend, first-faulting + + /// + /// svint64_t svldff1sw_s64(svbool_t pg, const int32_t *base) + /// + public static unsafe Vector LoadVectorInt32SignExtendFirstFaulting(Vector mask, int* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1sw_u64(svbool_t pg, const int32_t *base) + /// + public static unsafe Vector LoadVectorInt32SignExtendFirstFaulting(Vector mask, int* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorInt32SignExtendToInt64 : Load 32-bit data and sign-extend + + /// + /// svint64_t svld1sw_s64(svbool_t pg, const int32_t *base) + /// + public static unsafe Vector LoadVectorInt32SignExtendToInt64(Vector mask, int* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorInt32SignExtendToUInt64 : Load 32-bit data and sign-extend + + /// + /// svuint64_t svld1sw_u64(svbool_t pg, const int32_t *base) + /// + public static unsafe Vector LoadVectorInt32SignExtendToUInt64(Vector mask, int* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorNonFaulting : Unextended load, non-faulting + + /// + /// svint8_t svldnf1[_s8](svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorNonFaulting(sbyte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svldnf1[_s16](svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorNonFaulting(short* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svldnf1[_s32](svbool_t pg, const int32_t *base) + /// + public static unsafe Vector LoadVectorNonFaulting(int* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnf1[_s64](svbool_t pg, const int64_t *base) + /// + public static unsafe Vector LoadVectorNonFaulting(long* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svldnf1[_u8](svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorNonFaulting(byte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svldnf1[_u16](svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorNonFaulting(ushort* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldnf1[_u32](svbool_t pg, const uint32_t *base) + /// + public static unsafe Vector LoadVectorNonFaulting(uint* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnf1[_u64](svbool_t pg, const uint64_t *base) + /// + public static unsafe Vector LoadVectorNonFaulting(ulong* address) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svldnf1[_f32](svbool_t pg, const float32_t *base) + /// + public static unsafe Vector LoadVectorNonFaulting(float* address) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svldnf1[_f64](svbool_t pg, const float64_t *base) + /// + public static unsafe Vector LoadVectorNonFaulting(double* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorNonTemporal : Unextended load, non-temporal + + /// + /// svint8_t svldnt1[_s8](svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, sbyte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svldnt1[_s16](svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, short* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svldnt1[_s32](svbool_t pg, const int32_t *base) + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, int* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1[_s64](svbool_t pg, const int64_t *base) + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, long* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svldnt1[_u8](svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, byte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svldnt1[_u16](svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, ushort* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldnt1[_u32](svbool_t pg, const uint32_t *base) + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, uint* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1[_u64](svbool_t pg, const uint64_t *base) + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, ulong* address) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svldnt1[_f32](svbool_t pg, const float32_t *base) + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, float* address) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svldnt1[_f64](svbool_t pg, const float64_t *base) + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, double* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorSByteNonFaultingSignExtendToInt16 : Load 8-bit data and sign-extend, non-faulting + + /// + /// svint16_t svldnf1sb_s16(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToInt16(sbyte* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorSByteNonFaultingSignExtendToInt32 : Load 8-bit data and sign-extend, non-faulting + + /// + /// svint32_t svldnf1sb_s32(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToInt32(sbyte* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorSByteNonFaultingSignExtendToInt64 : Load 8-bit data and sign-extend, non-faulting + + /// + /// svint64_t svldnf1sb_s64(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToInt64(sbyte* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorSByteNonFaultingSignExtendToUInt16 : Load 8-bit data and sign-extend, non-faulting + + /// + /// svuint16_t svldnf1sb_u16(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToUInt16(sbyte* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorSByteNonFaultingSignExtendToUInt32 : Load 8-bit data and sign-extend, non-faulting + + /// + /// svuint32_t svldnf1sb_u32(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToUInt32(sbyte* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorSByteNonFaultingSignExtendToUInt64 : Load 8-bit data and sign-extend, non-faulting + + /// + /// svuint64_t svldnf1sb_u64(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToUInt64(sbyte* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorSByteSignExtendFirstFaulting : Load 8-bit data and sign-extend, first-faulting + + /// + /// svint16_t svldff1sb_s16(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svldff1sb_s32(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1sb_s64(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svldff1sb_u16(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldff1sb_u32(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1sb_u64(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorSByteSignExtendToInt16 : Load 8-bit data and sign-extend + + /// + /// svint16_t svld1sb_s16(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteSignExtendToInt16(Vector mask, sbyte* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorSByteSignExtendToInt32 : Load 8-bit data and sign-extend + + /// + /// svint32_t svld1sb_s32(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteSignExtendToInt32(Vector mask, sbyte* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorSByteSignExtendToInt64 : Load 8-bit data and sign-extend + + /// + /// svint64_t svld1sb_s64(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteSignExtendToInt64(Vector mask, sbyte* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorSByteSignExtendToUInt16 : Load 8-bit data and sign-extend + + /// + /// svuint16_t svld1sb_u16(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteSignExtendToUInt16(Vector mask, sbyte* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorSByteSignExtendToUInt32 : Load 8-bit data and sign-extend + + /// + /// svuint32_t svld1sb_u32(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteSignExtendToUInt32(Vector mask, sbyte* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorSByteSignExtendToUInt64 : Load 8-bit data and sign-extend + + /// + /// svuint64_t svld1sb_u64(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteSignExtendToUInt64(Vector mask, sbyte* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorUInt16NonFaultingZeroExtendToInt32 : Load 16-bit data and zero-extend, non-faulting + + /// + /// svint32_t svldnf1uh_s32(svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorUInt16NonFaultingZeroExtendToInt32(ushort* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorUInt16NonFaultingZeroExtendToInt64 : Load 16-bit data and zero-extend, non-faulting + + /// + /// svint64_t svldnf1uh_s64(svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorUInt16NonFaultingZeroExtendToInt64(ushort* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorUInt16NonFaultingZeroExtendToUInt32 : Load 16-bit data and zero-extend, non-faulting + + /// + /// svuint32_t svldnf1uh_u32(svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorUInt16NonFaultingZeroExtendToUInt32(ushort* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorUInt16NonFaultingZeroExtendToUInt64 : Load 16-bit data and zero-extend, non-faulting + + /// + /// svuint64_t svldnf1uh_u64(svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorUInt16NonFaultingZeroExtendToUInt64(ushort* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorUInt16ZeroExtendFirstFaulting : Load 16-bit data and zero-extend, first-faulting + + /// + /// svint32_t svldff1uh_s32(svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldff1uh_s64(svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldff1uh_u32(svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1uh_u64(svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorUInt16ZeroExtendToInt32 : Load 16-bit data and zero-extend + + /// + /// svint32_t svld1uh_s32(svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorUInt16ZeroExtendToInt32(Vector mask, ushort* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorUInt16ZeroExtendToInt64 : Load 16-bit data and zero-extend + + /// + /// svint64_t svld1uh_s64(svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorUInt16ZeroExtendToInt64(Vector mask, ushort* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorUInt16ZeroExtendToUInt32 : Load 16-bit data and zero-extend + + /// + /// svuint32_t svld1uh_u32(svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorUInt16ZeroExtendToUInt32(Vector mask, ushort* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorUInt16ZeroExtendToUInt64 : Load 16-bit data and zero-extend + + /// + /// svuint64_t svld1uh_u64(svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorUInt16ZeroExtendToUInt64(Vector mask, ushort* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorUInt32NonFaultingZeroExtendToInt64 : Load 32-bit data and zero-extend, non-faulting + + /// + /// svint64_t svldnf1uw_s64(svbool_t pg, const uint32_t *base) + /// + public static unsafe Vector LoadVectorUInt32NonFaultingZeroExtendToInt64(uint* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorUInt32NonFaultingZeroExtendToUInt64 : Load 32-bit data and zero-extend, non-faulting + + /// + /// svuint64_t svldnf1uw_u64(svbool_t pg, const uint32_t *base) + /// + public static unsafe Vector LoadVectorUInt32NonFaultingZeroExtendToUInt64(uint* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorUInt32ZeroExtendFirstFaulting : Load 32-bit data and zero-extend, first-faulting + + /// + /// svint64_t svldff1uw_s64(svbool_t pg, const uint32_t *base) + /// + public static unsafe Vector LoadVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldff1uw_u64(svbool_t pg, const uint32_t *base) + /// + public static unsafe Vector LoadVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorUInt32ZeroExtendToInt64 : Load 32-bit data and zero-extend + + /// + /// svint64_t svld1uw_s64(svbool_t pg, const uint32_t *base) + /// + public static unsafe Vector LoadVectorUInt32ZeroExtendToInt64(Vector mask, uint* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorUInt32ZeroExtendToUInt64 : Load 32-bit data and zero-extend + + /// + /// svuint64_t svld1uw_u64(svbool_t pg, const uint32_t *base) + /// + public static unsafe Vector LoadVectorUInt32ZeroExtendToUInt64(Vector mask, uint* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorx2 : Load two-element tuples into two vectors + + /// + /// svint8x2_t svld2[_s8](svbool_t pg, const int8_t *base) + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, sbyte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint16x2_t svld2[_s16](svbool_t pg, const int16_t *base) + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, short* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint32x2_t svld2[_s32](svbool_t pg, const int32_t *base) + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, int* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint64x2_t svld2[_s64](svbool_t pg, const int64_t *base) + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, long* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8x2_t svld2[_u8](svbool_t pg, const uint8_t *base) + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, byte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16x2_t svld2[_u16](svbool_t pg, const uint16_t *base) + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, ushort* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32x2_t svld2[_u32](svbool_t pg, const uint32_t *base) + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, uint* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64x2_t svld2[_u64](svbool_t pg, const uint64_t *base) + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, ulong* address) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32x2_t svld2[_f32](svbool_t pg, const float32_t *base) + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, float* address) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64x2_t svld2[_f64](svbool_t pg, const float64_t *base) + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, double* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorx3 : Load three-element tuples into three vectors + + /// + /// svint8x3_t svld3[_s8](svbool_t pg, const int8_t *base) + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, sbyte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint16x3_t svld3[_s16](svbool_t pg, const int16_t *base) + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, short* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint32x3_t svld3[_s32](svbool_t pg, const int32_t *base) + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, int* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint64x3_t svld3[_s64](svbool_t pg, const int64_t *base) + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, long* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8x3_t svld3[_u8](svbool_t pg, const uint8_t *base) + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, byte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16x3_t svld3[_u16](svbool_t pg, const uint16_t *base) + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, ushort* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32x3_t svld3[_u32](svbool_t pg, const uint32_t *base) + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, uint* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64x3_t svld3[_u64](svbool_t pg, const uint64_t *base) + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, ulong* address) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32x3_t svld3[_f32](svbool_t pg, const float32_t *base) + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, float* address) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64x3_t svld3[_f64](svbool_t pg, const float64_t *base) + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, double* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorx4 : Load four-element tuples into four vectors + + /// + /// svint8x4_t svld4[_s8](svbool_t pg, const int8_t *base) + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, sbyte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint16x4_t svld4[_s16](svbool_t pg, const int16_t *base) + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, short* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint32x4_t svld4[_s32](svbool_t pg, const int32_t *base) + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, int* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint64x4_t svld4[_s64](svbool_t pg, const int64_t *base) + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, long* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8x4_t svld4[_u8](svbool_t pg, const uint8_t *base) + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, byte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16x4_t svld4[_u16](svbool_t pg, const uint16_t *base) + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, ushort* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32x4_t svld4[_u32](svbool_t pg, const uint32_t *base) + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, uint* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64x4_t svld4[_u64](svbool_t pg, const uint64_t *base) + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, ulong* address) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32x4_t svld4[_f32](svbool_t pg, const float32_t *base) + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, float* address) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64x4_t svld4[_f64](svbool_t pg, const float64_t *base) + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, double* address) { throw new PlatformNotSupportedException(); } + + + /// Max : Maximum + + /// + /// svint8_t svmax[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svmax[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svmax[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector Max(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svmax[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svmax[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svmax[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector Max(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svmax[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svmax[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svmax[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector Max(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svmax[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svmax[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svmax[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector Max(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svmax[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svmax[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svmax[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector Max(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svmax[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svmax[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svmax[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector Max(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svmax[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svmax[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svmax[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector Max(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svmax[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svmax[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svmax[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector Max(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svmax[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svmax[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svmax[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector Max(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svmax[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svmax[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svmax[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector Max(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// MaxAcross : Maximum reduction to scalar + + /// + /// int8_t svmaxv[_s8](svbool_t pg, svint8_t op) + /// + public static unsafe Vector MaxAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int16_t svmaxv[_s16](svbool_t pg, svint16_t op) + /// + public static unsafe Vector MaxAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int32_t svmaxv[_s32](svbool_t pg, svint32_t op) + /// + public static unsafe Vector MaxAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t svmaxv[_s64](svbool_t pg, svint64_t op) + /// + public static unsafe Vector MaxAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint8_t svmaxv[_u8](svbool_t pg, svuint8_t op) + /// + public static unsafe Vector MaxAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint16_t svmaxv[_u16](svbool_t pg, svuint16_t op) + /// + public static unsafe Vector MaxAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint32_t svmaxv[_u32](svbool_t pg, svuint32_t op) + /// + public static unsafe Vector MaxAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svmaxv[_u64](svbool_t pg, svuint64_t op) + /// + public static unsafe Vector MaxAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// float32_t svmaxv[_f32](svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector MaxAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// float64_t svmaxv[_f64](svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector MaxAcross(Vector value) { throw new PlatformNotSupportedException(); } + + + /// MaxNumber : Maximum number + + /// + /// svfloat32_t svmaxnm[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svmaxnm[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svmaxnm[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector MaxNumber(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svmaxnm[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svmaxnm[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svmaxnm[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector MaxNumber(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// MaxNumberAcross : Maximum number reduction to scalar + + /// + /// float32_t svmaxnmv[_f32](svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector MaxNumberAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// float64_t svmaxnmv[_f64](svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector MaxNumberAcross(Vector value) { throw new PlatformNotSupportedException(); } + + + /// Min : Minimum + + /// + /// svint8_t svmin[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svmin[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svmin[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector Min(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svmin[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svmin[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svmin[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector Min(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svmin[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svmin[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svmin[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector Min(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svmin[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svmin[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svmin[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector Min(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svmin[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svmin[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svmin[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector Min(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svmin[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svmin[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svmin[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector Min(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svmin[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svmin[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svmin[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector Min(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svmin[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svmin[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svmin[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector Min(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svmin[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svmin[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svmin[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector Min(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svmin[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svmin[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svmin[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector Min(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// MinAcross : Minimum reduction to scalar + + /// + /// int8_t svminv[_s8](svbool_t pg, svint8_t op) + /// + public static unsafe Vector MinAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int16_t svminv[_s16](svbool_t pg, svint16_t op) + /// + public static unsafe Vector MinAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int32_t svminv[_s32](svbool_t pg, svint32_t op) + /// + public static unsafe Vector MinAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t svminv[_s64](svbool_t pg, svint64_t op) + /// + public static unsafe Vector MinAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint8_t svminv[_u8](svbool_t pg, svuint8_t op) + /// + public static unsafe Vector MinAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint16_t svminv[_u16](svbool_t pg, svuint16_t op) + /// + public static unsafe Vector MinAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint32_t svminv[_u32](svbool_t pg, svuint32_t op) + /// + public static unsafe Vector MinAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svminv[_u64](svbool_t pg, svuint64_t op) + /// + public static unsafe Vector MinAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// float32_t svminv[_f32](svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector MinAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// float64_t svminv[_f64](svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector MinAcross(Vector value) { throw new PlatformNotSupportedException(); } + + + /// MinNumber : Minimum number + + /// + /// svfloat32_t svminnm[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svminnm[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svminnm[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector MinNumber(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svminnm[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svminnm[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svminnm[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector MinNumber(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// MinNumberAcross : Minimum number reduction to scalar + + /// + /// float32_t svminnmv[_f32](svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector MinNumberAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// float64_t svminnmv[_f64](svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector MinNumberAcross(Vector value) { throw new PlatformNotSupportedException(); } + + + + /// Multiply : Multiply + + /// + /// svint8_t svmul[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svmul[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svmul[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector Multiply(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svmul[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svmul[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svmul[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector Multiply(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svmul[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svmul[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svmul[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector Multiply(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svmul[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svmul[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svmul[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector Multiply(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svmul[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svmul[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svmul[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector Multiply(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svmul[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svmul[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svmul[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector Multiply(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svmul[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svmul[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svmul[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector Multiply(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svmul[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svmul[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svmul[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector Multiply(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svmul[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svmul[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svmul[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector Multiply(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svmul[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svmul[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svmul[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector Multiply(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// MultiplyAdd : Multiply-add, addend first + + /// + /// svint8_t svmla[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3) + /// svint8_t svmla[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3) + /// svint8_t svmla[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svmla[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3) + /// svint16_t svmla[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3) + /// svint16_t svmla[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svmla[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3) + /// svint32_t svmla[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3) + /// svint32_t svmla[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svmla[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3) + /// svint64_t svmla[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3) + /// svint64_t svmla[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3) + /// + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svmla[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// svuint8_t svmla[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// svuint8_t svmla[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svmla[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// svuint16_t svmla[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// svuint16_t svmla[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svmla[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// svuint32_t svmla[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// svuint32_t svmla[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svmla[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// svuint64_t svmla[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// svuint64_t svmla[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + + + /// MultiplyAddRotateComplex : Complex multiply-add with rotate + + /// + /// svfloat32_t svcmla[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_rotation) + /// svfloat32_t svcmla[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_rotation) + /// svfloat32_t svcmla[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svcmla[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3, uint64_t imm_rotation) + /// svfloat64_t svcmla[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3, uint64_t imm_rotation) + /// svfloat64_t svcmla[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + + /// MultiplyAddRotateComplexBySelectedScalar : Complex multiply-add with rotate + + /// + /// svfloat32_t svcmla_lane[_f32](svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + + /// MultiplyBySelectedScalar : Multiply + + /// + /// svfloat32_t svmul_lane[_f32](svfloat32_t op1, svfloat32_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svmul_lane[_f64](svfloat64_t op1, svfloat64_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + + /// MultiplyExtended : Multiply extended (∞×0=2) + + /// + /// svfloat32_t svmulx[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svmulx[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svmulx[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector MultiplyExtended(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svmulx[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svmulx[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svmulx[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector MultiplyExtended(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + + /// MultiplySubtract : Multiply-subtract, minuend first + + /// + /// svint8_t svmls[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3) + /// svint8_t svmls[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3) + /// svint8_t svmls[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svmls[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3) + /// svint16_t svmls[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3) + /// svint16_t svmls[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svmls[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3) + /// svint32_t svmls[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3) + /// svint32_t svmls[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svmls[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3) + /// svint64_t svmls[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3) + /// svint64_t svmls[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3) + /// + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svmls[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// svuint8_t svmls[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// svuint8_t svmls[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svmls[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// svuint16_t svmls[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// svuint16_t svmls[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svmls[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// svuint32_t svmls[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// svuint32_t svmls[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svmls[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// svuint64_t svmls[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// svuint64_t svmls[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + + + /// Negate : Negate + + /// + /// svint8_t svneg[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) + /// svint8_t svneg[_s8]_x(svbool_t pg, svint8_t op) + /// svint8_t svneg[_s8]_z(svbool_t pg, svint8_t op) + /// + public static unsafe Vector Negate(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svneg[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) + /// svint16_t svneg[_s16]_x(svbool_t pg, svint16_t op) + /// svint16_t svneg[_s16]_z(svbool_t pg, svint16_t op) + /// + public static unsafe Vector Negate(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svneg[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// svint32_t svneg[_s32]_x(svbool_t pg, svint32_t op) + /// svint32_t svneg[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector Negate(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svneg[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// svint64_t svneg[_s64]_x(svbool_t pg, svint64_t op) + /// svint64_t svneg[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector Negate(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svneg[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) + /// svfloat32_t svneg[_f32]_x(svbool_t pg, svfloat32_t op) + /// svfloat32_t svneg[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector Negate(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svneg[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) + /// svfloat64_t svneg[_f64]_x(svbool_t pg, svfloat64_t op) + /// svfloat64_t svneg[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector Negate(Vector value) { throw new PlatformNotSupportedException(); } + + + + + /// Not : Bitwise invert + + /// + /// svint8_t svnot[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) + /// svint8_t svnot[_s8]_x(svbool_t pg, svint8_t op) + /// svint8_t svnot[_s8]_z(svbool_t pg, svint8_t op) + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector Not(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svnot[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) + /// svint16_t svnot[_s16]_x(svbool_t pg, svint16_t op) + /// svint16_t svnot[_s16]_z(svbool_t pg, svint16_t op) + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector Not(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svnot[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// svint32_t svnot[_s32]_x(svbool_t pg, svint32_t op) + /// svint32_t svnot[_s32]_z(svbool_t pg, svint32_t op) + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector Not(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svnot[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// svint64_t svnot[_s64]_x(svbool_t pg, svint64_t op) + /// svint64_t svnot[_s64]_z(svbool_t pg, svint64_t op) + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector Not(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svnot[_u8]_m(svuint8_t inactive, svbool_t pg, svuint8_t op) + /// svuint8_t svnot[_u8]_x(svbool_t pg, svuint8_t op) + /// svuint8_t svnot[_u8]_z(svbool_t pg, svuint8_t op) + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector Not(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svnot[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) + /// svuint16_t svnot[_u16]_x(svbool_t pg, svuint16_t op) + /// svuint16_t svnot[_u16]_z(svbool_t pg, svuint16_t op) + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector Not(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svnot[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// svuint32_t svnot[_u32]_x(svbool_t pg, svuint32_t op) + /// svuint32_t svnot[_u32]_z(svbool_t pg, svuint32_t op) + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector Not(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svnot[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// svuint64_t svnot[_u64]_x(svbool_t pg, svuint64_t op) + /// svuint64_t svnot[_u64]_z(svbool_t pg, svuint64_t op) + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector Not(Vector value) { throw new PlatformNotSupportedException(); } + + + /// Or : Bitwise inclusive OR + + /// + /// svint8_t svorr[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svorr[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svorr[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Or(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svorr[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svorr[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svorr[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Or(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svorr[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svorr[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svorr[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Or(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svorr[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svorr[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svorr[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Or(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svorr[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svorr[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svorr[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Or(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svorr[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svorr[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svorr[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Or(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svorr[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svorr[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svorr[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Or(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svorr[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svorr[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svorr[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Or(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// OrAcross : Bitwise inclusive OR reduction to scalar + + /// + /// int8_t svorv[_s8](svbool_t pg, svint8_t op) + /// + public static unsafe Vector OrAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int16_t svorv[_s16](svbool_t pg, svint16_t op) + /// + public static unsafe Vector OrAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int32_t svorv[_s32](svbool_t pg, svint32_t op) + /// + public static unsafe Vector OrAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t svorv[_s64](svbool_t pg, svint64_t op) + /// + public static unsafe Vector OrAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint8_t svorv[_u8](svbool_t pg, svuint8_t op) + /// + public static unsafe Vector OrAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint16_t svorv[_u16](svbool_t pg, svuint16_t op) + /// + public static unsafe Vector OrAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint32_t svorv[_u32](svbool_t pg, svuint32_t op) + /// + public static unsafe Vector OrAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svorv[_u64](svbool_t pg, svuint64_t op) + /// + public static unsafe Vector OrAcross(Vector value) { throw new PlatformNotSupportedException(); } + + + /// OrNot : Bitwise NOR + + /// + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector OrNot(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector OrNot(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector OrNot(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector OrNot(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector OrNot(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector OrNot(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector OrNot(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector OrNot(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// PopCount : Count nonzero bits + + /// + /// svuint8_t svcnt[_s8]_m(svuint8_t inactive, svbool_t pg, svint8_t op) + /// svuint8_t svcnt[_s8]_x(svbool_t pg, svint8_t op) + /// svuint8_t svcnt[_s8]_z(svbool_t pg, svint8_t op) + /// + public static unsafe Vector PopCount(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svcnt[_u8]_m(svuint8_t inactive, svbool_t pg, svuint8_t op) + /// svuint8_t svcnt[_u8]_x(svbool_t pg, svuint8_t op) + /// svuint8_t svcnt[_u8]_z(svbool_t pg, svuint8_t op) + /// + public static unsafe Vector PopCount(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svcnt[_s16]_m(svuint16_t inactive, svbool_t pg, svint16_t op) + /// svuint16_t svcnt[_s16]_x(svbool_t pg, svint16_t op) + /// svuint16_t svcnt[_s16]_z(svbool_t pg, svint16_t op) + /// + public static unsafe Vector PopCount(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svcnt[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) + /// svuint16_t svcnt[_u16]_x(svbool_t pg, svuint16_t op) + /// svuint16_t svcnt[_u16]_z(svbool_t pg, svuint16_t op) + /// + public static unsafe Vector PopCount(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svcnt[_s32]_m(svuint32_t inactive, svbool_t pg, svint32_t op) + /// svuint32_t svcnt[_s32]_x(svbool_t pg, svint32_t op) + /// svuint32_t svcnt[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector PopCount(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svcnt[_f32]_m(svuint32_t inactive, svbool_t pg, svfloat32_t op) + /// svuint32_t svcnt[_f32]_x(svbool_t pg, svfloat32_t op) + /// svuint32_t svcnt[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector PopCount(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svcnt[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// svuint32_t svcnt[_u32]_x(svbool_t pg, svuint32_t op) + /// svuint32_t svcnt[_u32]_z(svbool_t pg, svuint32_t op) + /// + public static unsafe Vector PopCount(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svcnt[_s64]_m(svuint64_t inactive, svbool_t pg, svint64_t op) + /// svuint64_t svcnt[_s64]_x(svbool_t pg, svint64_t op) + /// svuint64_t svcnt[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector PopCount(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svcnt[_f64]_m(svuint64_t inactive, svbool_t pg, svfloat64_t op) + /// svuint64_t svcnt[_f64]_x(svbool_t pg, svfloat64_t op) + /// svuint64_t svcnt[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector PopCount(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svcnt[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// svuint64_t svcnt[_u64]_x(svbool_t pg, svuint64_t op) + /// svuint64_t svcnt[_u64]_z(svbool_t pg, svuint64_t op) + /// + public static unsafe Vector PopCount(Vector value) { throw new PlatformNotSupportedException(); } + + + /// PrefetchBytes : Prefetch bytes + + /// + /// void svprfb(svbool_t pg, const void *base, enum svprfop op) + /// + public static unsafe void PrefetchBytes(Vector mask, void* address, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + + /// PrefetchInt16 : Prefetch halfwords + + /// + /// void svprfh(svbool_t pg, const void *base, enum svprfop op) + /// + public static unsafe void PrefetchInt16(Vector mask, void* address, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + + /// PrefetchInt32 : Prefetch words + + /// + /// void svprfw(svbool_t pg, const void *base, enum svprfop op) + /// + public static unsafe void PrefetchInt32(Vector mask, void* address, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + + /// PrefetchInt64 : Prefetch doublewords + + /// + /// void svprfd(svbool_t pg, const void *base, enum svprfop op) + /// + public static unsafe void PrefetchInt64(Vector mask, void* address, [ConstantExpected] SvePrefetchType prefetchType) { throw new PlatformNotSupportedException(); } + + + /// ReciprocalEstimate : Reciprocal estimate + + /// + /// svfloat32_t svrecpe[_f32](svfloat32_t op) + /// + public static unsafe Vector ReciprocalEstimate(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svrecpe[_f64](svfloat64_t op) + /// + public static unsafe Vector ReciprocalEstimate(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ReciprocalExponent : Reciprocal exponent + + /// + /// svfloat32_t svrecpx[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) + /// svfloat32_t svrecpx[_f32]_x(svbool_t pg, svfloat32_t op) + /// svfloat32_t svrecpx[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector ReciprocalExponent(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svrecpx[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) + /// svfloat64_t svrecpx[_f64]_x(svbool_t pg, svfloat64_t op) + /// svfloat64_t svrecpx[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector ReciprocalExponent(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ReciprocalSqrtEstimate : Reciprocal square root estimate + + /// + /// svfloat32_t svrsqrte[_f32](svfloat32_t op) + /// + public static unsafe Vector ReciprocalSqrtEstimate(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svrsqrte[_f64](svfloat64_t op) + /// + public static unsafe Vector ReciprocalSqrtEstimate(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ReciprocalSqrtStep : Reciprocal square root step + + /// + /// svfloat32_t svrsqrts[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector ReciprocalSqrtStep(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svrsqrts[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector ReciprocalSqrtStep(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// ReciprocalStep : Reciprocal step + + /// + /// svfloat32_t svrecps[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector ReciprocalStep(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svrecps[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector ReciprocalStep(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// ReverseBits : Reverse bits + + /// + /// svint8_t svrbit[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) + /// svint8_t svrbit[_s8]_x(svbool_t pg, svint8_t op) + /// svint8_t svrbit[_s8]_z(svbool_t pg, svint8_t op) + /// + public static unsafe Vector ReverseBits(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svrbit[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) + /// svint16_t svrbit[_s16]_x(svbool_t pg, svint16_t op) + /// svint16_t svrbit[_s16]_z(svbool_t pg, svint16_t op) + /// + public static unsafe Vector ReverseBits(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svrbit[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// svint32_t svrbit[_s32]_x(svbool_t pg, svint32_t op) + /// svint32_t svrbit[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector ReverseBits(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svrbit[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// svint64_t svrbit[_s64]_x(svbool_t pg, svint64_t op) + /// svint64_t svrbit[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector ReverseBits(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svrbit[_u8]_m(svuint8_t inactive, svbool_t pg, svuint8_t op) + /// svuint8_t svrbit[_u8]_x(svbool_t pg, svuint8_t op) + /// svuint8_t svrbit[_u8]_z(svbool_t pg, svuint8_t op) + /// + public static unsafe Vector ReverseBits(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svrbit[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) + /// svuint16_t svrbit[_u16]_x(svbool_t pg, svuint16_t op) + /// svuint16_t svrbit[_u16]_z(svbool_t pg, svuint16_t op) + /// + public static unsafe Vector ReverseBits(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svrbit[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// svuint32_t svrbit[_u32]_x(svbool_t pg, svuint32_t op) + /// svuint32_t svrbit[_u32]_z(svbool_t pg, svuint32_t op) + /// + public static unsafe Vector ReverseBits(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svrbit[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// svuint64_t svrbit[_u64]_x(svbool_t pg, svuint64_t op) + /// svuint64_t svrbit[_u64]_z(svbool_t pg, svuint64_t op) + /// + public static unsafe Vector ReverseBits(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ReverseElement : Reverse all elements + + /// + /// svint8_t svrev[_s8](svint8_t op) + /// + public static unsafe Vector ReverseElement(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svrev[_s16](svint16_t op) + /// + public static unsafe Vector ReverseElement(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svrev[_s32](svint32_t op) + /// + public static unsafe Vector ReverseElement(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svrev[_s64](svint64_t op) + /// + public static unsafe Vector ReverseElement(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svrev[_u8](svuint8_t op) + /// svbool_t svrev_b8(svbool_t op) + /// + public static unsafe Vector ReverseElement(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svrev[_u16](svuint16_t op) + /// svbool_t svrev_b16(svbool_t op) + /// + public static unsafe Vector ReverseElement(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svrev[_u32](svuint32_t op) + /// svbool_t svrev_b32(svbool_t op) + /// + public static unsafe Vector ReverseElement(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svrev[_u64](svuint64_t op) + /// svbool_t svrev_b64(svbool_t op) + /// + public static unsafe Vector ReverseElement(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svrev[_f32](svfloat32_t op) + /// + public static unsafe Vector ReverseElement(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svrev[_f64](svfloat64_t op) + /// + public static unsafe Vector ReverseElement(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ReverseElement16 : Reverse halfwords within elements + + /// + /// svint32_t svrevh[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// svint32_t svrevh[_s32]_x(svbool_t pg, svint32_t op) + /// svint32_t svrevh[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector ReverseElement16(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svrevh[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// svint64_t svrevh[_s64]_x(svbool_t pg, svint64_t op) + /// svint64_t svrevh[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector ReverseElement16(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svrevh[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// svuint32_t svrevh[_u32]_x(svbool_t pg, svuint32_t op) + /// svuint32_t svrevh[_u32]_z(svbool_t pg, svuint32_t op) + /// + public static unsafe Vector ReverseElement16(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svrevh[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// svuint64_t svrevh[_u64]_x(svbool_t pg, svuint64_t op) + /// svuint64_t svrevh[_u64]_z(svbool_t pg, svuint64_t op) + /// + public static unsafe Vector ReverseElement16(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ReverseElement32 : Reverse words within elements + + /// + /// svint64_t svrevw[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// svint64_t svrevw[_s64]_x(svbool_t pg, svint64_t op) + /// svint64_t svrevw[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector ReverseElement32(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svrevw[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// svuint64_t svrevw[_u64]_x(svbool_t pg, svuint64_t op) + /// svuint64_t svrevw[_u64]_z(svbool_t pg, svuint64_t op) + /// + public static unsafe Vector ReverseElement32(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ReverseElement8 : Reverse bytes within elements + + /// + /// svint16_t svrevb[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) + /// svint16_t svrevb[_s16]_x(svbool_t pg, svint16_t op) + /// svint16_t svrevb[_s16]_z(svbool_t pg, svint16_t op) + /// + public static unsafe Vector ReverseElement8(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svrevb[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// svint32_t svrevb[_s32]_x(svbool_t pg, svint32_t op) + /// svint32_t svrevb[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector ReverseElement8(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svrevb[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// svint64_t svrevb[_s64]_x(svbool_t pg, svint64_t op) + /// svint64_t svrevb[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector ReverseElement8(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svrevb[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) + /// svuint16_t svrevb[_u16]_x(svbool_t pg, svuint16_t op) + /// svuint16_t svrevb[_u16]_z(svbool_t pg, svuint16_t op) + /// + public static unsafe Vector ReverseElement8(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svrevb[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// svuint32_t svrevb[_u32]_x(svbool_t pg, svuint32_t op) + /// svuint32_t svrevb[_u32]_z(svbool_t pg, svuint32_t op) + /// + public static unsafe Vector ReverseElement8(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svrevb[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// svuint64_t svrevb[_u64]_x(svbool_t pg, svuint64_t op) + /// svuint64_t svrevb[_u64]_z(svbool_t pg, svuint64_t op) + /// + public static unsafe Vector ReverseElement8(Vector value) { throw new PlatformNotSupportedException(); } + + + /// RoundAwayFromZero : Round to nearest, ties away from zero + + /// + /// svfloat32_t svrinta[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) + /// svfloat32_t svrinta[_f32]_x(svbool_t pg, svfloat32_t op) + /// svfloat32_t svrinta[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector RoundAwayFromZero(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svrinta[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) + /// svfloat64_t svrinta[_f64]_x(svbool_t pg, svfloat64_t op) + /// svfloat64_t svrinta[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector RoundAwayFromZero(Vector value) { throw new PlatformNotSupportedException(); } + + + /// RoundToNearest : Round to nearest, ties to even + + /// + /// svfloat32_t svrintn[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) + /// svfloat32_t svrintn[_f32]_x(svbool_t pg, svfloat32_t op) + /// svfloat32_t svrintn[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector RoundToNearest(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svrintn[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) + /// svfloat64_t svrintn[_f64]_x(svbool_t pg, svfloat64_t op) + /// svfloat64_t svrintn[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector RoundToNearest(Vector value) { throw new PlatformNotSupportedException(); } + + + /// RoundToNegativeInfinity : Round towards -∞ + + /// + /// svfloat32_t svrintm[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) + /// svfloat32_t svrintm[_f32]_x(svbool_t pg, svfloat32_t op) + /// svfloat32_t svrintm[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector RoundToNegativeInfinity(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svrintm[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) + /// svfloat64_t svrintm[_f64]_x(svbool_t pg, svfloat64_t op) + /// svfloat64_t svrintm[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector RoundToNegativeInfinity(Vector value) { throw new PlatformNotSupportedException(); } + + + /// RoundToPositiveInfinity : Round towards +∞ + + /// + /// svfloat32_t svrintp[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) + /// svfloat32_t svrintp[_f32]_x(svbool_t pg, svfloat32_t op) + /// svfloat32_t svrintp[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector RoundToPositiveInfinity(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svrintp[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) + /// svfloat64_t svrintp[_f64]_x(svbool_t pg, svfloat64_t op) + /// svfloat64_t svrintp[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector RoundToPositiveInfinity(Vector value) { throw new PlatformNotSupportedException(); } + + + /// RoundToZero : Round towards zero + + /// + /// svfloat32_t svrintz[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) + /// svfloat32_t svrintz[_f32]_x(svbool_t pg, svfloat32_t op) + /// svfloat32_t svrintz[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector RoundToZero(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svrintz[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) + /// svfloat64_t svrintz[_f64]_x(svbool_t pg, svfloat64_t op) + /// svfloat64_t svrintz[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector RoundToZero(Vector value) { throw new PlatformNotSupportedException(); } + + + + + /// SaturatingDecrementBy16BitElementCount : Saturating decrement by number of halfword elements + + /// + /// int32_t svqdech_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe int SaturatingDecrementBy16BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t svqdech_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe long SaturatingDecrementBy16BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// uint32_t svqdech_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe uint SaturatingDecrementBy16BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svqdech_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe ulong SaturatingDecrementBy16BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqdech_pat[_s16](svint16_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe Vector SaturatingDecrementBy16BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svqdech_pat[_u16](svuint16_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe Vector SaturatingDecrementBy16BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + + /// SaturatingDecrementBy32BitElementCount : Saturating decrement by number of word elements + + /// + /// int32_t svqdecw_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe int SaturatingDecrementBy32BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t svqdecw_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe long SaturatingDecrementBy32BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// uint32_t svqdecw_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe uint SaturatingDecrementBy32BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svqdecw_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe ulong SaturatingDecrementBy32BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqdecw_pat[_s32](svint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe Vector SaturatingDecrementBy32BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svqdecw_pat[_u32](svuint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe Vector SaturatingDecrementBy32BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + + /// SaturatingDecrementBy64BitElementCount : Saturating decrement by number of doubleword elements + + /// + /// int32_t svqdecd_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe int SaturatingDecrementBy64BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t svqdecd_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe long SaturatingDecrementBy64BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// uint32_t svqdecd_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe uint SaturatingDecrementBy64BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svqdecd_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe ulong SaturatingDecrementBy64BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqdecd_pat[_s64](svint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe Vector SaturatingDecrementBy64BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svqdecd_pat[_u64](svuint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe Vector SaturatingDecrementBy64BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + + /// SaturatingDecrementBy8BitElementCount : Saturating decrement by number of byte elements + + /// + /// int32_t svqdecb_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe int SaturatingDecrementBy8BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t svqdecb_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe long SaturatingDecrementBy8BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// uint32_t svqdecb_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe uint SaturatingDecrementBy8BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svqdecb_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe ulong SaturatingDecrementBy8BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + + /// SaturatingDecrementByActiveElementCount : Saturating decrement by active element count + + /// + /// svint16_t svqdecp[_s16](svint16_t op, svbool_t pg) + /// + public static unsafe Vector SaturatingDecrementByActiveElementCount(Vector value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqdecp[_s32](svint32_t op, svbool_t pg) + /// + public static unsafe Vector SaturatingDecrementByActiveElementCount(Vector value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqdecp[_s64](svint64_t op, svbool_t pg) + /// + public static unsafe Vector SaturatingDecrementByActiveElementCount(Vector value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// int32_t svqdecp[_n_s32]_b8(int32_t op, svbool_t pg) + /// + public static unsafe int SaturatingDecrementByActiveElementCount(int value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t svqdecp[_n_s64]_b8(int64_t op, svbool_t pg) + /// + public static unsafe long SaturatingDecrementByActiveElementCount(long value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// uint32_t svqdecp[_n_u32]_b8(uint32_t op, svbool_t pg) + /// + public static unsafe uint SaturatingDecrementByActiveElementCount(uint value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svqdecp[_n_u64]_b8(uint64_t op, svbool_t pg) + /// + public static unsafe ulong SaturatingDecrementByActiveElementCount(ulong value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// int32_t svqdecp[_n_s32]_b16(int32_t op, svbool_t pg) + /// + public static unsafe int SaturatingDecrementByActiveElementCount(int value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t svqdecp[_n_s64]_b16(int64_t op, svbool_t pg) + /// + public static unsafe long SaturatingDecrementByActiveElementCount(long value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// uint32_t svqdecp[_n_u32]_b16(uint32_t op, svbool_t pg) + /// + public static unsafe uint SaturatingDecrementByActiveElementCount(uint value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svqdecp[_n_u64]_b16(uint64_t op, svbool_t pg) + /// + public static unsafe ulong SaturatingDecrementByActiveElementCount(ulong value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svqdecp[_u16](svuint16_t op, svbool_t pg) + /// + public static unsafe Vector SaturatingDecrementByActiveElementCount(Vector value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// int32_t svqdecp[_n_s32]_b32(int32_t op, svbool_t pg) + /// + public static unsafe int SaturatingDecrementByActiveElementCount(int value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t svqdecp[_n_s64]_b32(int64_t op, svbool_t pg) + /// + public static unsafe long SaturatingDecrementByActiveElementCount(long value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// uint32_t svqdecp[_n_u32]_b32(uint32_t op, svbool_t pg) + /// + public static unsafe uint SaturatingDecrementByActiveElementCount(uint value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svqdecp[_n_u64]_b32(uint64_t op, svbool_t pg) + /// + public static unsafe ulong SaturatingDecrementByActiveElementCount(ulong value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svqdecp[_u32](svuint32_t op, svbool_t pg) + /// + public static unsafe Vector SaturatingDecrementByActiveElementCount(Vector value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// int32_t svqdecp[_n_s32]_b64(int32_t op, svbool_t pg) + /// + public static unsafe int SaturatingDecrementByActiveElementCount(int value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t svqdecp[_n_s64]_b64(int64_t op, svbool_t pg) + /// + public static unsafe long SaturatingDecrementByActiveElementCount(long value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// uint32_t svqdecp[_n_u32]_b64(uint32_t op, svbool_t pg) + /// + public static unsafe uint SaturatingDecrementByActiveElementCount(uint value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svqdecp[_n_u64]_b64(uint64_t op, svbool_t pg) + /// + public static unsafe ulong SaturatingDecrementByActiveElementCount(ulong value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svqdecp[_u64](svuint64_t op, svbool_t pg) + /// + public static unsafe Vector SaturatingDecrementByActiveElementCount(Vector value, Vector from) { throw new PlatformNotSupportedException(); } + + + /// SaturatingIncrementBy16BitElementCount : Saturating increment by number of halfword elements + + /// + /// int32_t svqinch_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe int SaturatingIncrementBy16BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t svqinch_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe long SaturatingIncrementBy16BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// uint32_t svqinch_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe uint SaturatingIncrementBy16BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svqinch_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe ulong SaturatingIncrementBy16BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqinch_pat[_s16](svint16_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe Vector SaturatingIncrementBy16BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svqinch_pat[_u16](svuint16_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe Vector SaturatingIncrementBy16BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + + /// SaturatingIncrementBy32BitElementCount : Saturating increment by number of word elements + + /// + /// int32_t svqincw_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe int SaturatingIncrementBy32BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t svqincw_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe long SaturatingIncrementBy32BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// uint32_t svqincw_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe uint SaturatingIncrementBy32BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svqincw_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe ulong SaturatingIncrementBy32BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqincw_pat[_s32](svint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe Vector SaturatingIncrementBy32BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svqincw_pat[_u32](svuint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe Vector SaturatingIncrementBy32BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + + /// SaturatingIncrementBy64BitElementCount : Saturating increment by number of doubleword elements + + /// + /// int32_t svqincd_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe int SaturatingIncrementBy64BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t svqincd_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe long SaturatingIncrementBy64BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// uint32_t svqincd_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe uint SaturatingIncrementBy64BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svqincd_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe ulong SaturatingIncrementBy64BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqincd_pat[_s64](svint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe Vector SaturatingIncrementBy64BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svqincd_pat[_u64](svuint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe Vector SaturatingIncrementBy64BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + + /// SaturatingIncrementBy8BitElementCount : Saturating increment by number of byte elements + + /// + /// int32_t svqincb_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe int SaturatingIncrementBy8BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t svqincb_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe long SaturatingIncrementBy8BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// uint32_t svqincb_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe uint SaturatingIncrementBy8BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svqincb_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe ulong SaturatingIncrementBy8BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + + /// SaturatingIncrementByActiveElementCount : Saturating increment by active element count + + /// + /// svint16_t svqincp[_s16](svint16_t op, svbool_t pg) + /// + public static unsafe Vector SaturatingIncrementByActiveElementCount(Vector value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqincp[_s32](svint32_t op, svbool_t pg) + /// + public static unsafe Vector SaturatingIncrementByActiveElementCount(Vector value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqincp[_s64](svint64_t op, svbool_t pg) + /// + public static unsafe Vector SaturatingIncrementByActiveElementCount(Vector value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// int32_t svqincp[_n_s32]_b8(int32_t op, svbool_t pg) + /// + public static unsafe int SaturatingIncrementByActiveElementCount(int value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t svqincp[_n_s64]_b8(int64_t op, svbool_t pg) + /// + public static unsafe long SaturatingIncrementByActiveElementCount(long value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// uint32_t svqincp[_n_u32]_b8(uint32_t op, svbool_t pg) + /// + public static unsafe uint SaturatingIncrementByActiveElementCount(uint value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svqincp[_n_u64]_b8(uint64_t op, svbool_t pg) + /// + public static unsafe ulong SaturatingIncrementByActiveElementCount(ulong value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// int32_t svqincp[_n_s32]_b16(int32_t op, svbool_t pg) + /// + public static unsafe int SaturatingIncrementByActiveElementCount(int value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t svqincp[_n_s64]_b16(int64_t op, svbool_t pg) + /// + public static unsafe long SaturatingIncrementByActiveElementCount(long value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// uint32_t svqincp[_n_u32]_b16(uint32_t op, svbool_t pg) + /// + public static unsafe uint SaturatingIncrementByActiveElementCount(uint value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svqincp[_n_u64]_b16(uint64_t op, svbool_t pg) + /// + public static unsafe ulong SaturatingIncrementByActiveElementCount(ulong value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svqincp[_u16](svuint16_t op, svbool_t pg) + /// + public static unsafe Vector SaturatingIncrementByActiveElementCount(Vector value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// int32_t svqincp[_n_s32]_b32(int32_t op, svbool_t pg) + /// + public static unsafe int SaturatingIncrementByActiveElementCount(int value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t svqincp[_n_s64]_b32(int64_t op, svbool_t pg) + /// + public static unsafe long SaturatingIncrementByActiveElementCount(long value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// uint32_t svqincp[_n_u32]_b32(uint32_t op, svbool_t pg) + /// + public static unsafe uint SaturatingIncrementByActiveElementCount(uint value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svqincp[_n_u64]_b32(uint64_t op, svbool_t pg) + /// + public static unsafe ulong SaturatingIncrementByActiveElementCount(ulong value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svqincp[_u32](svuint32_t op, svbool_t pg) + /// + public static unsafe Vector SaturatingIncrementByActiveElementCount(Vector value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// int32_t svqincp[_n_s32]_b64(int32_t op, svbool_t pg) + /// + public static unsafe int SaturatingIncrementByActiveElementCount(int value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t svqincp[_n_s64]_b64(int64_t op, svbool_t pg) + /// + public static unsafe long SaturatingIncrementByActiveElementCount(long value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// uint32_t svqincp[_n_u32]_b64(uint32_t op, svbool_t pg) + /// + public static unsafe uint SaturatingIncrementByActiveElementCount(uint value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t svqincp[_n_u64]_b64(uint64_t op, svbool_t pg) + /// + public static unsafe ulong SaturatingIncrementByActiveElementCount(ulong value, Vector from) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svqincp[_u64](svuint64_t op, svbool_t pg) + /// + public static unsafe Vector SaturatingIncrementByActiveElementCount(Vector value, Vector from) { throw new PlatformNotSupportedException(); } + + + /// Scale : Adjust exponent + + /// + /// svfloat32_t svscale[_f32]_m(svbool_t pg, svfloat32_t op1, svint32_t op2) + /// svfloat32_t svscale[_f32]_x(svbool_t pg, svfloat32_t op1, svint32_t op2) + /// svfloat32_t svscale[_f32]_z(svbool_t pg, svfloat32_t op1, svint32_t op2) + /// + public static unsafe Vector Scale(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svscale[_f64]_m(svbool_t pg, svfloat64_t op1, svint64_t op2) + /// svfloat64_t svscale[_f64]_x(svbool_t pg, svfloat64_t op1, svint64_t op2) + /// svfloat64_t svscale[_f64]_z(svbool_t pg, svfloat64_t op1, svint64_t op2) + /// + public static unsafe Vector Scale(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// Scatter : Non-truncating store + + /// + /// void svst1_scatter_[s32]offset[_s32](svbool_t pg, int32_t *base, svint32_t offsets, svint32_t data) + /// void svst1_scatter_[s32]index[_s32](svbool_t pg, int32_t *base, svint32_t indices, svint32_t data) + /// + public static unsafe void Scatter(Vector mask, int* address, Vector indicies, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1_scatter[_u32base_s32](svbool_t pg, svuint32_t bases, svint32_t data) + /// + public static unsafe void Scatter(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1_scatter_[u32]offset[_s32](svbool_t pg, int32_t *base, svuint32_t offsets, svint32_t data) + /// void svst1_scatter_[u32]index[_s32](svbool_t pg, int32_t *base, svuint32_t indices, svint32_t data) + /// + public static unsafe void Scatter(Vector mask, int* address, Vector indicies, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1_scatter_[s64]offset[_s64](svbool_t pg, int64_t *base, svint64_t offsets, svint64_t data) + /// void svst1_scatter_[s64]index[_s64](svbool_t pg, int64_t *base, svint64_t indices, svint64_t data) + /// + public static unsafe void Scatter(Vector mask, long* address, Vector indicies, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) + /// + public static unsafe void Scatter(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1_scatter_[u64]offset[_s64](svbool_t pg, int64_t *base, svuint64_t offsets, svint64_t data) + /// void svst1_scatter_[u64]index[_s64](svbool_t pg, int64_t *base, svuint64_t indices, svint64_t data) + /// + public static unsafe void Scatter(Vector mask, long* address, Vector indicies, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1_scatter_[s32]offset[_u32](svbool_t pg, uint32_t *base, svint32_t offsets, svuint32_t data) + /// void svst1_scatter_[s32]index[_u32](svbool_t pg, uint32_t *base, svint32_t indices, svuint32_t data) + /// + public static unsafe void Scatter(Vector mask, uint* address, Vector indicies, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1_scatter[_u32base_u32](svbool_t pg, svuint32_t bases, svuint32_t data) + /// + public static unsafe void Scatter(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1_scatter_[u32]offset[_u32](svbool_t pg, uint32_t *base, svuint32_t offsets, svuint32_t data) + /// void svst1_scatter_[u32]index[_u32](svbool_t pg, uint32_t *base, svuint32_t indices, svuint32_t data) + /// + public static unsafe void Scatter(Vector mask, uint* address, Vector indicies, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1_scatter_[s64]offset[_u64](svbool_t pg, uint64_t *base, svint64_t offsets, svuint64_t data) + /// void svst1_scatter_[s64]index[_u64](svbool_t pg, uint64_t *base, svint64_t indices, svuint64_t data) + /// + public static unsafe void Scatter(Vector mask, ulong* address, Vector indicies, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) + /// + public static unsafe void Scatter(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1_scatter_[u64]offset[_u64](svbool_t pg, uint64_t *base, svuint64_t offsets, svuint64_t data) + /// void svst1_scatter_[u64]index[_u64](svbool_t pg, uint64_t *base, svuint64_t indices, svuint64_t data) + /// + public static unsafe void Scatter(Vector mask, ulong* address, Vector indicies, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1_scatter_[s32]offset[_f32](svbool_t pg, float32_t *base, svint32_t offsets, svfloat32_t data) + /// void svst1_scatter_[s32]index[_f32](svbool_t pg, float32_t *base, svint32_t indices, svfloat32_t data) + /// + public static unsafe void Scatter(Vector mask, float* address, Vector indicies, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1_scatter[_u32base_f32](svbool_t pg, svuint32_t bases, svfloat32_t data) + /// + public static unsafe void Scatter(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1_scatter_[u32]offset[_f32](svbool_t pg, float32_t *base, svuint32_t offsets, svfloat32_t data) + /// void svst1_scatter_[u32]index[_f32](svbool_t pg, float32_t *base, svuint32_t indices, svfloat32_t data) + /// + public static unsafe void Scatter(Vector mask, float* address, Vector indicies, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1_scatter_[s64]offset[_f64](svbool_t pg, float64_t *base, svint64_t offsets, svfloat64_t data) + /// void svst1_scatter_[s64]index[_f64](svbool_t pg, float64_t *base, svint64_t indices, svfloat64_t data) + /// + public static unsafe void Scatter(Vector mask, double* address, Vector indicies, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1_scatter[_u64base_f64](svbool_t pg, svuint64_t bases, svfloat64_t data) + /// + public static unsafe void Scatter(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1_scatter_[u64]offset[_f64](svbool_t pg, float64_t *base, svuint64_t offsets, svfloat64_t data) + /// void svst1_scatter_[u64]index[_f64](svbool_t pg, float64_t *base, svuint64_t indices, svfloat64_t data) + /// + public static unsafe void Scatter(Vector mask, double* address, Vector indicies, Vector data) { throw new PlatformNotSupportedException(); } + + + /// Scatter16BitNarrowing : Truncate to 16 bits and store + + /// + /// void svst1h_scatter[_u32base_s32](svbool_t pg, svuint32_t bases, svint32_t data) + /// + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1h_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) + /// + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1h_scatter[_u32base_u32](svbool_t pg, svuint32_t bases, svuint32_t data) + /// + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1h_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) + /// + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + + /// Scatter16BitWithByteOffsetsNarrowing : Truncate to 16 bits and store + + /// + /// void svst1h_scatter_[s32]offset[_s32](svbool_t pg, int16_t *base, svint32_t offsets, svint32_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1h_scatter_[u32]offset[_s32](svbool_t pg, int16_t *base, svuint32_t offsets, svint32_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1h_scatter_[s32]index[_s32](svbool_t pg, int16_t *base, svint32_t indices, svint32_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1h_scatter_[u32]index[_s32](svbool_t pg, int16_t *base, svuint32_t indices, svint32_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1h_scatter_[s64]offset[_s64](svbool_t pg, int16_t *base, svint64_t offsets, svint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1h_scatter_[u64]offset[_s64](svbool_t pg, int16_t *base, svuint64_t offsets, svint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1h_scatter_[s64]index[_s64](svbool_t pg, int16_t *base, svint64_t indices, svint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1h_scatter_[u64]index[_s64](svbool_t pg, int16_t *base, svuint64_t indices, svint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1h_scatter_[s32]offset[_u32](svbool_t pg, uint16_t *base, svint32_t offsets, svuint32_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1h_scatter_[u32]offset[_u32](svbool_t pg, uint16_t *base, svuint32_t offsets, svuint32_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1h_scatter_[s32]index[_u32](svbool_t pg, uint16_t *base, svint32_t indices, svuint32_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1h_scatter_[u32]index[_u32](svbool_t pg, uint16_t *base, svuint32_t indices, svuint32_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1h_scatter_[s64]offset[_u64](svbool_t pg, uint16_t *base, svint64_t offsets, svuint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1h_scatter_[u64]offset[_u64](svbool_t pg, uint16_t *base, svuint64_t offsets, svuint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1h_scatter_[s64]index[_u64](svbool_t pg, uint16_t *base, svint64_t indices, svuint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1h_scatter_[u64]index[_u64](svbool_t pg, uint16_t *base, svuint64_t indices, svuint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data) { throw new PlatformNotSupportedException(); } + + + /// Scatter32BitNarrowing : Truncate to 32 bits and store + + /// + /// void svst1w_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) + /// + public static unsafe void Scatter32BitNarrowing(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1w_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) + /// + public static unsafe void Scatter32BitNarrowing(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + + /// Scatter32BitWithByteOffsetsNarrowing : Truncate to 32 bits and store + + /// + /// void svst1w_scatter_[s64]offset[_s64](svbool_t pg, int32_t *base, svint64_t offsets, svint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1w_scatter_[u64]offset[_s64](svbool_t pg, int32_t *base, svuint64_t offsets, svint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1w_scatter_[s64]index[_s64](svbool_t pg, int32_t *base, svint64_t indices, svint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector indices, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1w_scatter_[u64]index[_s64](svbool_t pg, int32_t *base, svuint64_t indices, svint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector indices, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1w_scatter_[s64]offset[_u64](svbool_t pg, uint32_t *base, svint64_t offsets, svuint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1w_scatter_[u64]offset[_u64](svbool_t pg, uint32_t *base, svuint64_t offsets, svuint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1w_scatter_[s64]index[_u64](svbool_t pg, uint32_t *base, svint64_t indices, svuint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector indices, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1w_scatter_[u64]index[_u64](svbool_t pg, uint32_t *base, svuint64_t indices, svuint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector indices, Vector data) { throw new PlatformNotSupportedException(); } + + + /// Scatter8BitNarrowing : Truncate to 8 bits and store + + /// + /// void svst1b_scatter[_u32base_s32](svbool_t pg, svuint32_t bases, svint32_t data) + /// + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1b_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) + /// + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1b_scatter[_u32base_u32](svbool_t pg, svuint32_t bases, svuint32_t data) + /// + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1b_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) + /// + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + + /// Scatter8BitWithByteOffsetsNarrowing : Truncate to 8 bits and store + + /// + /// void svst1b_scatter_[s32]offset[_s32](svbool_t pg, int8_t *base, svint32_t offsets, svint32_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1b_scatter_[u32]offset[_s32](svbool_t pg, int8_t *base, svuint32_t offsets, svint32_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1b_scatter_[s64]offset[_s64](svbool_t pg, int8_t *base, svint64_t offsets, svint64_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1b_scatter_[u64]offset[_s64](svbool_t pg, int8_t *base, svuint64_t offsets, svint64_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1b_scatter_[s32]offset[_u32](svbool_t pg, uint8_t *base, svint32_t offsets, svuint32_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1b_scatter_[u32]offset[_u32](svbool_t pg, uint8_t *base, svuint32_t offsets, svuint32_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1b_scatter_[s64]offset[_u64](svbool_t pg, uint8_t *base, svint64_t offsets, svuint64_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1b_scatter_[u64]offset[_u64](svbool_t pg, uint8_t *base, svuint64_t offsets, svuint64_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + + /// SetFfr : Write to the first-fault register + + /// + /// void svwrffr(svbool_t op) + /// + public static unsafe void SetFfr(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// void svwrffr(svbool_t op) + /// + public static unsafe void SetFfr(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// void svwrffr(svbool_t op) + /// + public static unsafe void SetFfr(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// void svwrffr(svbool_t op) + /// + public static unsafe void SetFfr(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// void svwrffr(svbool_t op) + /// + public static unsafe void SetFfr(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// void svwrffr(svbool_t op) + /// + public static unsafe void SetFfr(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// void svwrffr(svbool_t op) + /// + public static unsafe void SetFfr(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// void svwrffr(svbool_t op) + /// + public static unsafe void SetFfr(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ShiftLeftLogical : Logical shift left + + /// + /// svint8_t svlsl[_s8]_m(svbool_t pg, svint8_t op1, svuint8_t op2) + /// svint8_t svlsl[_s8]_x(svbool_t pg, svint8_t op1, svuint8_t op2) + /// svint8_t svlsl[_s8]_z(svbool_t pg, svint8_t op1, svuint8_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint8_t svlsl_wide[_s8]_m(svbool_t pg, svint8_t op1, svuint64_t op2) + /// svint8_t svlsl_wide[_s8]_x(svbool_t pg, svint8_t op1, svuint64_t op2) + /// svint8_t svlsl_wide[_s8]_z(svbool_t pg, svint8_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svlsl[_s16]_m(svbool_t pg, svint16_t op1, svuint16_t op2) + /// svint16_t svlsl[_s16]_x(svbool_t pg, svint16_t op1, svuint16_t op2) + /// svint16_t svlsl[_s16]_z(svbool_t pg, svint16_t op1, svuint16_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svlsl_wide[_s16]_m(svbool_t pg, svint16_t op1, svuint64_t op2) + /// svint16_t svlsl_wide[_s16]_x(svbool_t pg, svint16_t op1, svuint64_t op2) + /// svint16_t svlsl_wide[_s16]_z(svbool_t pg, svint16_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svlsl[_s32]_m(svbool_t pg, svint32_t op1, svuint32_t op2) + /// svint32_t svlsl[_s32]_x(svbool_t pg, svint32_t op1, svuint32_t op2) + /// svint32_t svlsl[_s32]_z(svbool_t pg, svint32_t op1, svuint32_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svlsl_wide[_s32]_m(svbool_t pg, svint32_t op1, svuint64_t op2) + /// svint32_t svlsl_wide[_s32]_x(svbool_t pg, svint32_t op1, svuint64_t op2) + /// svint32_t svlsl_wide[_s32]_z(svbool_t pg, svint32_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svlsl[_s64]_m(svbool_t pg, svint64_t op1, svuint64_t op2) + /// svint64_t svlsl[_s64]_x(svbool_t pg, svint64_t op1, svuint64_t op2) + /// svint64_t svlsl[_s64]_z(svbool_t pg, svint64_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svlsl[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svlsl[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svlsl[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svlsl_wide[_u8]_m(svbool_t pg, svuint8_t op1, svuint64_t op2) + /// svuint8_t svlsl_wide[_u8]_x(svbool_t pg, svuint8_t op1, svuint64_t op2) + /// svuint8_t svlsl_wide[_u8]_z(svbool_t pg, svuint8_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svlsl[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svlsl[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svlsl[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svlsl_wide[_u16]_m(svbool_t pg, svuint16_t op1, svuint64_t op2) + /// svuint16_t svlsl_wide[_u16]_x(svbool_t pg, svuint16_t op1, svuint64_t op2) + /// svuint16_t svlsl_wide[_u16]_z(svbool_t pg, svuint16_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svlsl[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svlsl[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svlsl[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svlsl_wide[_u32]_m(svbool_t pg, svuint32_t op1, svuint64_t op2) + /// svuint32_t svlsl_wide[_u32]_x(svbool_t pg, svuint32_t op1, svuint64_t op2) + /// svuint32_t svlsl_wide[_u32]_z(svbool_t pg, svuint32_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svlsl[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svlsl[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svlsl[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// ShiftRightArithmetic : Arithmetic shift right + + /// + /// svint8_t svasr[_s8]_m(svbool_t pg, svint8_t op1, svuint8_t op2) + /// svint8_t svasr[_s8]_x(svbool_t pg, svint8_t op1, svuint8_t op2) + /// svint8_t svasr[_s8]_z(svbool_t pg, svint8_t op1, svuint8_t op2) + /// + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint8_t svasr_wide[_s8]_m(svbool_t pg, svint8_t op1, svuint64_t op2) + /// svint8_t svasr_wide[_s8]_x(svbool_t pg, svint8_t op1, svuint64_t op2) + /// svint8_t svasr_wide[_s8]_z(svbool_t pg, svint8_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svasr[_s16]_m(svbool_t pg, svint16_t op1, svuint16_t op2) + /// svint16_t svasr[_s16]_x(svbool_t pg, svint16_t op1, svuint16_t op2) + /// svint16_t svasr[_s16]_z(svbool_t pg, svint16_t op1, svuint16_t op2) + /// + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svasr_wide[_s16]_m(svbool_t pg, svint16_t op1, svuint64_t op2) + /// svint16_t svasr_wide[_s16]_x(svbool_t pg, svint16_t op1, svuint64_t op2) + /// svint16_t svasr_wide[_s16]_z(svbool_t pg, svint16_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svasr[_s32]_m(svbool_t pg, svint32_t op1, svuint32_t op2) + /// svint32_t svasr[_s32]_x(svbool_t pg, svint32_t op1, svuint32_t op2) + /// svint32_t svasr[_s32]_z(svbool_t pg, svint32_t op1, svuint32_t op2) + /// + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svasr_wide[_s32]_m(svbool_t pg, svint32_t op1, svuint64_t op2) + /// svint32_t svasr_wide[_s32]_x(svbool_t pg, svint32_t op1, svuint64_t op2) + /// svint32_t svasr_wide[_s32]_z(svbool_t pg, svint32_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svasr[_s64]_m(svbool_t pg, svint64_t op1, svuint64_t op2) + /// svint64_t svasr[_s64]_x(svbool_t pg, svint64_t op1, svuint64_t op2) + /// svint64_t svasr[_s64]_z(svbool_t pg, svint64_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// ShiftRightArithmeticForDivide : Arithmetic shift right for divide by immediate + + /// + /// svint8_t svasrd[_n_s8]_m(svbool_t pg, svint8_t op1, uint64_t imm2) + /// svint8_t svasrd[_n_s8]_x(svbool_t pg, svint8_t op1, uint64_t imm2) + /// svint8_t svasrd[_n_s8]_z(svbool_t pg, svint8_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticForDivide(Vector value, [ConstantExpected] byte control) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svasrd[_n_s16]_m(svbool_t pg, svint16_t op1, uint64_t imm2) + /// svint16_t svasrd[_n_s16]_x(svbool_t pg, svint16_t op1, uint64_t imm2) + /// svint16_t svasrd[_n_s16]_z(svbool_t pg, svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticForDivide(Vector value, [ConstantExpected] byte control) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svasrd[_n_s32]_m(svbool_t pg, svint32_t op1, uint64_t imm2) + /// svint32_t svasrd[_n_s32]_x(svbool_t pg, svint32_t op1, uint64_t imm2) + /// svint32_t svasrd[_n_s32]_z(svbool_t pg, svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticForDivide(Vector value, [ConstantExpected] byte control) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svasrd[_n_s64]_m(svbool_t pg, svint64_t op1, uint64_t imm2) + /// svint64_t svasrd[_n_s64]_x(svbool_t pg, svint64_t op1, uint64_t imm2) + /// svint64_t svasrd[_n_s64]_z(svbool_t pg, svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticForDivide(Vector value, [ConstantExpected] byte control) { throw new PlatformNotSupportedException(); } + + + /// ShiftRightLogical : Logical shift right + + /// + /// svuint8_t svlsr[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svlsr[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svlsr[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector ShiftRightLogical(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svlsr_wide[_u8]_m(svbool_t pg, svuint8_t op1, svuint64_t op2) + /// svuint8_t svlsr_wide[_u8]_x(svbool_t pg, svuint8_t op1, svuint64_t op2) + /// svuint8_t svlsr_wide[_u8]_z(svbool_t pg, svuint8_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftRightLogical(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svlsr[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svlsr[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svlsr[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector ShiftRightLogical(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svlsr_wide[_u16]_m(svbool_t pg, svuint16_t op1, svuint64_t op2) + /// svuint16_t svlsr_wide[_u16]_x(svbool_t pg, svuint16_t op1, svuint64_t op2) + /// svuint16_t svlsr_wide[_u16]_z(svbool_t pg, svuint16_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftRightLogical(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svlsr[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svlsr[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svlsr[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector ShiftRightLogical(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svlsr_wide[_u32]_m(svbool_t pg, svuint32_t op1, svuint64_t op2) + /// svuint32_t svlsr_wide[_u32]_x(svbool_t pg, svuint32_t op1, svuint64_t op2) + /// svuint32_t svlsr_wide[_u32]_z(svbool_t pg, svuint32_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftRightLogical(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svlsr[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svlsr[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svlsr[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftRightLogical(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// SignExtend16 : Sign-extend the low 16 bits + + /// + /// svint32_t svexth[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// svint32_t svexth[_s32]_x(svbool_t pg, svint32_t op) + /// svint32_t svexth[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector SignExtend16(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svexth[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// svint64_t svexth[_s64]_x(svbool_t pg, svint64_t op) + /// svint64_t svexth[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector SignExtend16(Vector value) { throw new PlatformNotSupportedException(); } + + + /// SignExtend32 : Sign-extend the low 32 bits + + /// + /// svint64_t svextw[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// svint64_t svextw[_s64]_x(svbool_t pg, svint64_t op) + /// svint64_t svextw[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector SignExtend32(Vector value) { throw new PlatformNotSupportedException(); } + + + /// SignExtend8 : Sign-extend the low 8 bits + + /// + /// svint16_t svextb[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) + /// svint16_t svextb[_s16]_x(svbool_t pg, svint16_t op) + /// svint16_t svextb[_s16]_z(svbool_t pg, svint16_t op) + /// + public static unsafe Vector SignExtend8(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svextb[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// svint32_t svextb[_s32]_x(svbool_t pg, svint32_t op) + /// svint32_t svextb[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector SignExtend8(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svextb[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// svint64_t svextb[_s64]_x(svbool_t pg, svint64_t op) + /// svint64_t svextb[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector SignExtend8(Vector value) { throw new PlatformNotSupportedException(); } + + + /// SignExtendWideningLower : Unpack and extend low half + + /// + /// svint16_t svunpklo[_s16](svint8_t op) + /// + public static unsafe Vector SignExtendWideningLower(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svunpklo[_s32](svint16_t op) + /// + public static unsafe Vector SignExtendWideningLower(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svunpklo[_s64](svint32_t op) + /// + public static unsafe Vector SignExtendWideningLower(Vector value) { throw new PlatformNotSupportedException(); } + + + /// SignExtendWideningUpper : Unpack and extend high half + + /// + /// svint16_t svunpkhi[_s16](svint8_t op) + /// + public static unsafe Vector SignExtendWideningUpper(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svunpkhi[_s32](svint16_t op) + /// + public static unsafe Vector SignExtendWideningUpper(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svunpkhi[_s64](svint32_t op) + /// + public static unsafe Vector SignExtendWideningUpper(Vector value) { throw new PlatformNotSupportedException(); } + + + /// Splice : Splice two vectors under predicate control + + /// + /// svint8_t svsplice[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svsplice[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svsplice[_s32](svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svsplice[_s64](svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svsplice[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svsplice[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svsplice[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svsplice[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svsplice[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svsplice[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// Sqrt : Square root + + /// + /// svfloat32_t svsqrt[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) + /// svfloat32_t svsqrt[_f32]_x(svbool_t pg, svfloat32_t op) + /// svfloat32_t svsqrt[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector Sqrt(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svsqrt[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) + /// svfloat64_t svsqrt[_f64]_x(svbool_t pg, svfloat64_t op) + /// svfloat64_t svsqrt[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector Sqrt(Vector value) { throw new PlatformNotSupportedException(); } + + + /// Store : Non-truncating store + + /// + /// void svst1[_s8](svbool_t pg, int8_t *base, svint8_t data) + /// + public static unsafe void Store(Vector mask, sbyte* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst2[_s8](svbool_t pg, int8_t *base, svint8x2_t data) + /// + public static unsafe void Store(Vector mask, sbyte* address, (Vector Value1, Vector Value2) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst3[_s8](svbool_t pg, int8_t *base, svint8x3_t data) + /// + public static unsafe void Store(Vector mask, sbyte* address, (Vector Value1, Vector Value2, Vector Value3) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst4[_s8](svbool_t pg, int8_t *base, svint8x4_t data) + /// + public static unsafe void Store(Vector mask, sbyte* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1[_s16](svbool_t pg, int16_t *base, svint16_t data) + /// + public static unsafe void Store(Vector mask, short* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst2[_s16](svbool_t pg, int16_t *base, svint16x2_t data) + /// + public static unsafe void Store(Vector mask, short* address, (Vector Value1, Vector Value2) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst3[_s16](svbool_t pg, int16_t *base, svint16x3_t data) + /// + public static unsafe void Store(Vector mask, short* address, (Vector Value1, Vector Value2, Vector Value3) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst4[_s16](svbool_t pg, int16_t *base, svint16x4_t data) + /// + public static unsafe void Store(Vector mask, short* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1[_s32](svbool_t pg, int32_t *base, svint32_t data) + /// + public static unsafe void Store(Vector mask, int* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst2[_s32](svbool_t pg, int32_t *base, svint32x2_t data) + /// + public static unsafe void Store(Vector mask, int* address, (Vector Value1, Vector Value2) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst3[_s32](svbool_t pg, int32_t *base, svint32x3_t data) + /// + public static unsafe void Store(Vector mask, int* address, (Vector Value1, Vector Value2, Vector Value3) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst4[_s32](svbool_t pg, int32_t *base, svint32x4_t data) + /// + public static unsafe void Store(Vector mask, int* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1[_s64](svbool_t pg, int64_t *base, svint64_t data) + /// + public static unsafe void Store(Vector mask, long* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst2[_s64](svbool_t pg, int64_t *base, svint64x2_t data) + /// + public static unsafe void Store(Vector mask, long* address, (Vector Value1, Vector Value2) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst3[_s64](svbool_t pg, int64_t *base, svint64x3_t data) + /// + public static unsafe void Store(Vector mask, long* address, (Vector Value1, Vector Value2, Vector Value3) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst4[_s64](svbool_t pg, int64_t *base, svint64x4_t data) + /// + public static unsafe void Store(Vector mask, long* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1[_u8](svbool_t pg, uint8_t *base, svuint8_t data) + /// + public static unsafe void Store(Vector mask, byte* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst2[_u8](svbool_t pg, uint8_t *base, svuint8x2_t data) + /// + public static unsafe void Store(Vector mask, byte* address, (Vector Value1, Vector Value2) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst3[_u8](svbool_t pg, uint8_t *base, svuint8x3_t data) + /// + public static unsafe void Store(Vector mask, byte* address, (Vector Value1, Vector Value2, Vector Value3) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst4[_u8](svbool_t pg, uint8_t *base, svuint8x4_t data) + /// + public static unsafe void Store(Vector mask, byte* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1[_u16](svbool_t pg, uint16_t *base, svuint16_t data) + /// + public static unsafe void Store(Vector mask, ushort* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst2[_u16](svbool_t pg, uint16_t *base, svuint16x2_t data) + /// + public static unsafe void Store(Vector mask, ushort* address, (Vector Value1, Vector Value2) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst3[_u16](svbool_t pg, uint16_t *base, svuint16x3_t data) + /// + public static unsafe void Store(Vector mask, ushort* address, (Vector Value1, Vector Value2, Vector Value3) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst4[_u16](svbool_t pg, uint16_t *base, svuint16x4_t data) + /// + public static unsafe void Store(Vector mask, ushort* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1[_u32](svbool_t pg, uint32_t *base, svuint32_t data) + /// + public static unsafe void Store(Vector mask, uint* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst2[_u32](svbool_t pg, uint32_t *base, svuint32x2_t data) + /// + public static unsafe void Store(Vector mask, uint* address, (Vector Value1, Vector Value2) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst3[_u32](svbool_t pg, uint32_t *base, svuint32x3_t data) + /// + public static unsafe void Store(Vector mask, uint* address, (Vector Value1, Vector Value2, Vector Value3) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst4[_u32](svbool_t pg, uint32_t *base, svuint32x4_t data) + /// + public static unsafe void Store(Vector mask, uint* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1[_u64](svbool_t pg, uint64_t *base, svuint64_t data) + /// + public static unsafe void Store(Vector mask, ulong* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst2[_u64](svbool_t pg, uint64_t *base, svuint64x2_t data) + /// + public static unsafe void Store(Vector mask, ulong* address, (Vector Value1, Vector Value2) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst3[_u64](svbool_t pg, uint64_t *base, svuint64x3_t data) + /// + public static unsafe void Store(Vector mask, ulong* address, (Vector Value1, Vector Value2, Vector Value3) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst4[_u64](svbool_t pg, uint64_t *base, svuint64x4_t data) + /// + public static unsafe void Store(Vector mask, ulong* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1[_f32](svbool_t pg, float32_t *base, svfloat32_t data) + /// + public static unsafe void Store(Vector mask, float* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst2[_f32](svbool_t pg, float32_t *base, svfloat32x2_t data) + /// + public static unsafe void Store(Vector mask, float* address, (Vector Value1, Vector Value2) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst3[_f32](svbool_t pg, float32_t *base, svfloat32x3_t data) + /// + public static unsafe void Store(Vector mask, float* address, (Vector Value1, Vector Value2, Vector Value3) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst4[_f32](svbool_t pg, float32_t *base, svfloat32x4_t data) + /// + public static unsafe void Store(Vector mask, float* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1[_f64](svbool_t pg, float64_t *base, svfloat64_t data) + /// + public static unsafe void Store(Vector mask, double* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst2[_f64](svbool_t pg, float64_t *base, svfloat64x2_t data) + /// + public static unsafe void Store(Vector mask, double* address, (Vector Value1, Vector Value2) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst3[_f64](svbool_t pg, float64_t *base, svfloat64x3_t data) + /// + public static unsafe void Store(Vector mask, double* address, (Vector Value1, Vector Value2, Vector Value3) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst4[_f64](svbool_t pg, float64_t *base, svfloat64x4_t data) + /// + public static unsafe void Store(Vector mask, double* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) { throw new PlatformNotSupportedException(); } + + + /// StoreNarrowing : Truncate to 8 bits and store + + /// + /// void svst1b[_s16](svbool_t pg, int8_t *base, svint16_t data) + /// + public static unsafe void StoreNarrowing(Vector mask, sbyte* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1b[_s32](svbool_t pg, int8_t *base, svint32_t data) + /// + public static unsafe void StoreNarrowing(Vector mask, sbyte* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1h[_s32](svbool_t pg, int16_t *base, svint32_t data) + /// + public static unsafe void StoreNarrowing(Vector mask, short* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1b[_s64](svbool_t pg, int8_t *base, svint64_t data) + /// + public static unsafe void StoreNarrowing(Vector mask, sbyte* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1h[_s64](svbool_t pg, int16_t *base, svint64_t data) + /// + public static unsafe void StoreNarrowing(Vector mask, short* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1w[_s64](svbool_t pg, int32_t *base, svint64_t data) + /// + public static unsafe void StoreNarrowing(Vector mask, int* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1b[_u16](svbool_t pg, uint8_t *base, svuint16_t data) + /// + public static unsafe void StoreNarrowing(Vector mask, byte* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1b[_u32](svbool_t pg, uint8_t *base, svuint32_t data) + /// + public static unsafe void StoreNarrowing(Vector mask, byte* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1h[_u32](svbool_t pg, uint16_t *base, svuint32_t data) + /// + public static unsafe void StoreNarrowing(Vector mask, ushort* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1b[_u64](svbool_t pg, uint8_t *base, svuint64_t data) + /// + public static unsafe void StoreNarrowing(Vector mask, byte* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1h[_u64](svbool_t pg, uint16_t *base, svuint64_t data) + /// + public static unsafe void StoreNarrowing(Vector mask, ushort* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst1w[_u64](svbool_t pg, uint32_t *base, svuint64_t data) + /// + public static unsafe void StoreNarrowing(Vector mask, uint* address, Vector data) { throw new PlatformNotSupportedException(); } + + + /// StoreNonTemporal : Non-truncating store, non-temporal + + /// + /// void svstnt1[_s8](svbool_t pg, int8_t *base, svint8_t data) + /// + public static unsafe void StoreNonTemporal(Vector mask, sbyte* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1[_s16](svbool_t pg, int16_t *base, svint16_t data) + /// + public static unsafe void StoreNonTemporal(Vector mask, short* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1[_s32](svbool_t pg, int32_t *base, svint32_t data) + /// + public static unsafe void StoreNonTemporal(Vector mask, int* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1[_s64](svbool_t pg, int64_t *base, svint64_t data) + /// + public static unsafe void StoreNonTemporal(Vector mask, long* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1[_u8](svbool_t pg, uint8_t *base, svuint8_t data) + /// + public static unsafe void StoreNonTemporal(Vector mask, byte* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1[_u16](svbool_t pg, uint16_t *base, svuint16_t data) + /// + public static unsafe void StoreNonTemporal(Vector mask, ushort* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1[_u32](svbool_t pg, uint32_t *base, svuint32_t data) + /// + public static unsafe void StoreNonTemporal(Vector mask, uint* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1[_u64](svbool_t pg, uint64_t *base, svuint64_t data) + /// + public static unsafe void StoreNonTemporal(Vector mask, ulong* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1[_f32](svbool_t pg, float32_t *base, svfloat32_t data) + /// + public static unsafe void StoreNonTemporal(Vector mask, float* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1[_f64](svbool_t pg, float64_t *base, svfloat64_t data) + /// + public static unsafe void StoreNonTemporal(Vector mask, double* address, Vector data) { throw new PlatformNotSupportedException(); } + + + /// Subtract : Subtract + + /// + /// svint8_t svsub[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svsub[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svsub[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector Subtract(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svsub[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svsub[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svsub[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector Subtract(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svsub[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svsub[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svsub[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector Subtract(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svsub[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svsub[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svsub[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector Subtract(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svsub[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svsub[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svsub[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector Subtract(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svsub[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svsub[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svsub[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector Subtract(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svsub[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svsub[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svsub[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector Subtract(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svsub[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svsub[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svsub[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector Subtract(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svsub[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svsub[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svsub[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector Subtract(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svsub[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svsub[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svsub[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector Subtract(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + + /// SubtractSaturate : Saturating subtract + + /// + /// svint8_t svqsub[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqsub[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqsub[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqsub[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svqsub[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svqsub[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svqsub[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svqsub[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// TestAnyTrue : Test whether any active element is true + + /// + /// bool svptest_any(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask) { throw new PlatformNotSupportedException(); } + + /// + /// bool svptest_any(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask) { throw new PlatformNotSupportedException(); } + + /// + /// bool svptest_any(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask) { throw new PlatformNotSupportedException(); } + + /// + /// bool svptest_any(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask) { throw new PlatformNotSupportedException(); } + + /// + /// bool svptest_any(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask) { throw new PlatformNotSupportedException(); } + + /// + /// bool svptest_any(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask) { throw new PlatformNotSupportedException(); } + + /// + /// bool svptest_any(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask) { throw new PlatformNotSupportedException(); } + + /// + /// bool svptest_any(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask) { throw new PlatformNotSupportedException(); } + + + /// TestFirstTrue : Test whether the first active element is true + + /// + /// bool svptest_first(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask) { throw new PlatformNotSupportedException(); } + + /// + /// bool svptest_first(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask) { throw new PlatformNotSupportedException(); } + + /// + /// bool svptest_first(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask) { throw new PlatformNotSupportedException(); } + + /// + /// bool svptest_first(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask) { throw new PlatformNotSupportedException(); } + + /// + /// bool svptest_first(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask) { throw new PlatformNotSupportedException(); } + + /// + /// bool svptest_first(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask) { throw new PlatformNotSupportedException(); } + + /// + /// bool svptest_first(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask) { throw new PlatformNotSupportedException(); } + + /// + /// bool svptest_first(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask) { throw new PlatformNotSupportedException(); } + + + /// TestLastTrue : Test whether the last active element is true + + /// + /// bool svptest_last(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask) { throw new PlatformNotSupportedException(); } + + /// + /// bool svptest_last(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask) { throw new PlatformNotSupportedException(); } + + /// + /// bool svptest_last(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask) { throw new PlatformNotSupportedException(); } + + /// + /// bool svptest_last(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask) { throw new PlatformNotSupportedException(); } + + /// + /// bool svptest_last(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask) { throw new PlatformNotSupportedException(); } + + /// + /// bool svptest_last(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask) { throw new PlatformNotSupportedException(); } + + /// + /// bool svptest_last(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask) { throw new PlatformNotSupportedException(); } + + /// + /// bool svptest_last(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask) { throw new PlatformNotSupportedException(); } + + + /// TransposeEven : Interleave even elements from two inputs + + /// + /// svint8_t svtrn1[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svtrn1[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svtrn1[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svtrn1[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svtrn1[_u8](svuint8_t op1, svuint8_t op2) + /// svbool_t svtrn1_b8(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svtrn1[_u16](svuint16_t op1, svuint16_t op2) + /// svbool_t svtrn1_b16(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svtrn1[_u32](svuint32_t op1, svuint32_t op2) + /// svbool_t svtrn1_b32(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svtrn1[_u64](svuint64_t op1, svuint64_t op2) + /// svbool_t svtrn1_b64(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svtrn1[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svtrn1[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// TransposeOdd : Interleave odd elements from two inputs + + /// + /// svint8_t svtrn2[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svtrn2[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svtrn2[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svtrn2[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svtrn2[_u8](svuint8_t op1, svuint8_t op2) + /// svbool_t svtrn2_b8(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svtrn2[_u16](svuint16_t op1, svuint16_t op2) + /// svbool_t svtrn2_b16(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svtrn2[_u32](svuint32_t op1, svuint32_t op2) + /// svbool_t svtrn2_b32(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svtrn2[_u64](svuint64_t op1, svuint64_t op2) + /// svbool_t svtrn2_b64(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svtrn2[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svtrn2[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// TrigonometricMultiplyAddCoefficient : Trigonometric multiply-add coefficient + + /// + /// svfloat32_t svtmad[_f32](svfloat32_t op1, svfloat32_t op2, uint64_t imm3) + /// + public static unsafe Vector TrigonometricMultiplyAddCoefficient(Vector left, Vector right, [ConstantExpected] byte control) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svtmad[_f64](svfloat64_t op1, svfloat64_t op2, uint64_t imm3) + /// + public static unsafe Vector TrigonometricMultiplyAddCoefficient(Vector left, Vector right, [ConstantExpected] byte control) { throw new PlatformNotSupportedException(); } + + + /// TrigonometricSelectCoefficient : Trigonometric select coefficient + + /// + /// svfloat32_t svtssel[_f32](svfloat32_t op1, svuint32_t op2) + /// + public static unsafe Vector TrigonometricSelectCoefficient(Vector value, Vector selector) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svtssel[_f64](svfloat64_t op1, svuint64_t op2) + /// + public static unsafe Vector TrigonometricSelectCoefficient(Vector value, Vector selector) { throw new PlatformNotSupportedException(); } + + + /// TrigonometricStartingValue : Trigonometric starting value + + /// + /// svfloat32_t svtsmul[_f32](svfloat32_t op1, svuint32_t op2) + /// + public static unsafe Vector TrigonometricStartingValue(Vector value, Vector sign) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svtsmul[_f64](svfloat64_t op1, svuint64_t op2) + /// + public static unsafe Vector TrigonometricStartingValue(Vector value, Vector sign) { throw new PlatformNotSupportedException(); } + + + /// UnzipEven : Concatenate even elements from two inputs + + /// + /// svint8_t svuzp1[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svuzp1[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svuzp1[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svuzp1[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svuzp1[_u8](svuint8_t op1, svuint8_t op2) + /// svbool_t svuzp1_b8(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svuzp1[_u16](svuint16_t op1, svuint16_t op2) + /// svbool_t svuzp1_b16(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svuzp1[_u32](svuint32_t op1, svuint32_t op2) + /// svbool_t svuzp1_b32(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svuzp1[_u64](svuint64_t op1, svuint64_t op2) + /// svbool_t svuzp1_b64(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svuzp1[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svuzp1[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// UnzipOdd : Concatenate odd elements from two inputs + + /// + /// svint8_t svuzp2[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svuzp2[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svuzp2[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svuzp2[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svuzp2[_u8](svuint8_t op1, svuint8_t op2) + /// svbool_t svuzp2_b8(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svuzp2[_u16](svuint16_t op1, svuint16_t op2) + /// svbool_t svuzp2_b16(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svuzp2[_u32](svuint32_t op1, svuint32_t op2) + /// svbool_t svuzp2_b32(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svuzp2[_u64](svuint64_t op1, svuint64_t op2) + /// svbool_t svuzp2_b64(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svuzp2[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svuzp2[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// VectorTableLookup : Table lookup in single-vector table + + /// + /// svint8_t svtbl[_s8](svint8_t data, svuint8_t indices) + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svtbl[_s16](svint16_t data, svuint16_t indices) + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svtbl[_s32](svint32_t data, svuint32_t indices) + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svtbl[_s64](svint64_t data, svuint64_t indices) + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svtbl[_u8](svuint8_t data, svuint8_t indices) + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svtbl[_u16](svuint16_t data, svuint16_t indices) + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svtbl[_u32](svuint32_t data, svuint32_t indices) + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svtbl[_u64](svuint64_t data, svuint64_t indices) + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svtbl[_f32](svfloat32_t data, svuint32_t indices) + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svtbl[_f64](svfloat64_t data, svuint64_t indices) + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) { throw new PlatformNotSupportedException(); } + + + /// Xor : Bitwise exclusive OR + + /// + /// svint8_t sveor[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t sveor[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t sveor[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Xor(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t sveor[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t sveor[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t sveor[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Xor(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t sveor[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t sveor[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t sveor[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Xor(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t sveor[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t sveor[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t sveor[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Xor(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t sveor[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t sveor[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t sveor[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Xor(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t sveor[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t sveor[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t sveor[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Xor(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t sveor[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t sveor[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t sveor[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Xor(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t sveor[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t sveor[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t sveor[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Xor(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// XorAcross : Bitwise exclusive OR reduction to scalar + + /// + /// int8_t sveorv[_s8](svbool_t pg, svint8_t op) + /// + public static unsafe Vector XorAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int16_t sveorv[_s16](svbool_t pg, svint16_t op) + /// + public static unsafe Vector XorAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int32_t sveorv[_s32](svbool_t pg, svint32_t op) + /// + public static unsafe Vector XorAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// int64_t sveorv[_s64](svbool_t pg, svint64_t op) + /// + public static unsafe Vector XorAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint8_t sveorv[_u8](svbool_t pg, svuint8_t op) + /// + public static unsafe Vector XorAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint16_t sveorv[_u16](svbool_t pg, svuint16_t op) + /// + public static unsafe Vector XorAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint32_t sveorv[_u32](svbool_t pg, svuint32_t op) + /// + public static unsafe Vector XorAcross(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// uint64_t sveorv[_u64](svbool_t pg, svuint64_t op) + /// + public static unsafe Vector XorAcross(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ZeroExtend16 : Zero-extend the low 16 bits + + /// + /// svuint32_t svexth[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// svuint32_t svexth[_u32]_x(svbool_t pg, svuint32_t op) + /// svuint32_t svexth[_u32]_z(svbool_t pg, svuint32_t op) + /// + public static unsafe Vector ZeroExtend16(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svexth[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// svuint64_t svexth[_u64]_x(svbool_t pg, svuint64_t op) + /// svuint64_t svexth[_u64]_z(svbool_t pg, svuint64_t op) + /// + public static unsafe Vector ZeroExtend16(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ZeroExtend32 : Zero-extend the low 32 bits + + /// + /// svuint64_t svextw[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// svuint64_t svextw[_u64]_x(svbool_t pg, svuint64_t op) + /// svuint64_t svextw[_u64]_z(svbool_t pg, svuint64_t op) + /// + public static unsafe Vector ZeroExtend32(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ZeroExtend8 : Zero-extend the low 8 bits + + /// + /// svuint16_t svextb[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) + /// svuint16_t svextb[_u16]_x(svbool_t pg, svuint16_t op) + /// svuint16_t svextb[_u16]_z(svbool_t pg, svuint16_t op) + /// + public static unsafe Vector ZeroExtend8(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svextb[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// svuint32_t svextb[_u32]_x(svbool_t pg, svuint32_t op) + /// svuint32_t svextb[_u32]_z(svbool_t pg, svuint32_t op) + /// + public static unsafe Vector ZeroExtend8(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svextb[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// svuint64_t svextb[_u64]_x(svbool_t pg, svuint64_t op) + /// svuint64_t svextb[_u64]_z(svbool_t pg, svuint64_t op) + /// + public static unsafe Vector ZeroExtend8(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ZeroExtendWideningLower : Unpack and extend low half + + /// + /// svuint16_t svunpklo[_u16](svuint8_t op) + /// svbool_t svunpklo[_b](svbool_t op) + /// + public static unsafe Vector ZeroExtendWideningLower(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svunpklo[_u32](svuint16_t op) + /// svbool_t svunpklo[_b](svbool_t op) + /// + public static unsafe Vector ZeroExtendWideningLower(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svunpklo[_u64](svuint32_t op) + /// svbool_t svunpklo[_b](svbool_t op) + /// + public static unsafe Vector ZeroExtendWideningLower(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ZeroExtendWideningUpper : Unpack and extend high half + + /// + /// svuint16_t svunpkhi[_u16](svuint8_t op) + /// svbool_t svunpkhi[_b](svbool_t op) + /// + public static unsafe Vector ZeroExtendWideningUpper(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svunpkhi[_u32](svuint16_t op) + /// svbool_t svunpkhi[_b](svbool_t op) + /// + public static unsafe Vector ZeroExtendWideningUpper(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svunpkhi[_u64](svuint32_t op) + /// svbool_t svunpkhi[_b](svbool_t op) + /// + public static unsafe Vector ZeroExtendWideningUpper(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ZipHigh : Interleave elements from high halves of two inputs + + /// + /// svint8_t svzip2[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svzip2[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svzip2[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svzip2[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svzip2[_u8](svuint8_t op1, svuint8_t op2) + /// svbool_t svzip2_b8(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svzip2[_u16](svuint16_t op1, svuint16_t op2) + /// svbool_t svzip2_b16(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svzip2[_u32](svuint32_t op1, svuint32_t op2) + /// svbool_t svzip2_b32(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svzip2[_u64](svuint64_t op1, svuint64_t op2) + /// svbool_t svzip2_b64(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svzip2[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svzip2[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// ZipLow : Interleave elements from low halves of two inputs + + /// + /// svint8_t svzip1[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector ZipLow(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svzip1[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector ZipLow(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svzip1[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector ZipLow(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svzip1[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector ZipLow(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svzip1[_u8](svuint8_t op1, svuint8_t op2) + /// svbool_t svzip1_b8(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ZipLow(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svzip1[_u16](svuint16_t op1, svuint16_t op2) + /// svbool_t svzip1_b16(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ZipLow(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svzip1[_u32](svuint32_t op1, svuint32_t op2) + /// svbool_t svzip1_b32(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ZipLow(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svzip1[_u64](svuint64_t op1, svuint64_t op2) + /// svbool_t svzip1_b64(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ZipLow(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svzip1[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector ZipLow(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svzip1[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector ZipLow(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + } +} + diff --git a/sve_api/out_cs_api/Sve.System.Runtime.Intrinsics.cs b/sve_api/out_cs_api/Sve.System.Runtime.Intrinsics.cs new file mode 100644 index 0000000000000..b082c76fe686d --- /dev/null +++ b/sve_api/out_cs_api/Sve.System.Runtime.Intrinsics.cs @@ -0,0 +1,1608 @@ + public static System.Numerics.Vector Abs(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Abs(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Abs(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Abs(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Abs(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Abs(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector AbsoluteCompareGreaterThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteCompareGreaterThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteCompareGreaterThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteCompareGreaterThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteCompareLessThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteCompareLessThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteCompareLessThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteCompareLessThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifference(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifference(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifference(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifference(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifference(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifference(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifference(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifference(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifference(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifference(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Add(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Add(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Add(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Add(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Add(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Add(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Add(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Add(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Add(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Add(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector AddAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector AddAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector AddAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector AddAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector AddAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector AddAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector AddAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector AddAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector AddAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector AddRotateComplex(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector AddRotateComplex(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector AddSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddSequentialAcross(System.Numerics.Vector initial, System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector AddSequentialAcross(System.Numerics.Vector initial, System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector And(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector And(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector And(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector And(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector And(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector And(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector And(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector And(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AndAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector AndAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector AndAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector AndAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector AndAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector AndAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector AndAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector AndAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector AndNot(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AndNot(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AndNot(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AndNot(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AndNot(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AndNot(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AndNot(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AndNot(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseClear(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseClear(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseClear(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseClear(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseClear(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseClear(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseClear(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseClear(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BooleanNot(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector BooleanNot(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector BooleanNot(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector BooleanNot(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector BooleanNot(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector BooleanNot(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector BooleanNot(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector BooleanNot(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Compact(System.Numerics.Vector mask, System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Compact(System.Numerics.Vector mask, System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Compact(System.Numerics.Vector mask, System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Compact(System.Numerics.Vector mask, System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Compact(System.Numerics.Vector mask, System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Compact(System.Numerics.Vector mask, System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector CompareEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareNotEqualTo(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareNotEqualTo(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareNotEqualTo(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareNotEqualTo(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareNotEqualTo(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareNotEqualTo(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareNotEqualTo(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareNotEqualTo(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareNotEqualTo(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareNotEqualTo(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareNotEqualTo(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareNotEqualTo(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareNotEqualTo(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareUnordered(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareUnordered(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Compute16BitAddresses(System.Numerics.Vector bases, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector Compute16BitAddresses(System.Numerics.Vector bases, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector Compute16BitAddresses(System.Numerics.Vector bases, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector Compute16BitAddresses(System.Numerics.Vector bases, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector Compute32BitAddresses(System.Numerics.Vector bases, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector Compute32BitAddresses(System.Numerics.Vector bases, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector Compute32BitAddresses(System.Numerics.Vector bases, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector Compute32BitAddresses(System.Numerics.Vector bases, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector Compute64BitAddresses(System.Numerics.Vector bases, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector Compute64BitAddresses(System.Numerics.Vector bases, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector Compute64BitAddresses(System.Numerics.Vector bases, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector Compute64BitAddresses(System.Numerics.Vector bases, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector Compute8BitAddresses(System.Numerics.Vector bases, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector Compute8BitAddresses(System.Numerics.Vector bases, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector Compute8BitAddresses(System.Numerics.Vector bases, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector Compute8BitAddresses(System.Numerics.Vector bases, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector ConditionalExtractAfterLastActiveElement(System.Numerics.Vector mask, System.Numerics.Vector defaultValue, System.Numerics.Vector data) { throw null; } + public static sbyte ConditionalExtractAfterLastActiveElement(System.Numerics.Vector mask, sbyte defaultValues, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractAfterLastActiveElement(System.Numerics.Vector mask, System.Numerics.Vector defaultValue, System.Numerics.Vector data) { throw null; } + public static short ConditionalExtractAfterLastActiveElement(System.Numerics.Vector mask, short defaultValues, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractAfterLastActiveElement(System.Numerics.Vector mask, System.Numerics.Vector defaultValue, System.Numerics.Vector data) { throw null; } + public static int ConditionalExtractAfterLastActiveElement(System.Numerics.Vector mask, int defaultValues, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractAfterLastActiveElement(System.Numerics.Vector mask, System.Numerics.Vector defaultValue, System.Numerics.Vector data) { throw null; } + public static long ConditionalExtractAfterLastActiveElement(System.Numerics.Vector mask, long defaultValues, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractAfterLastActiveElement(System.Numerics.Vector mask, System.Numerics.Vector defaultValue, System.Numerics.Vector data) { throw null; } + public static byte ConditionalExtractAfterLastActiveElement(System.Numerics.Vector mask, byte defaultValues, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractAfterLastActiveElement(System.Numerics.Vector mask, System.Numerics.Vector defaultValue, System.Numerics.Vector data) { throw null; } + public static ushort ConditionalExtractAfterLastActiveElement(System.Numerics.Vector mask, ushort defaultValues, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractAfterLastActiveElement(System.Numerics.Vector mask, System.Numerics.Vector defaultValue, System.Numerics.Vector data) { throw null; } + public static uint ConditionalExtractAfterLastActiveElement(System.Numerics.Vector mask, uint defaultValues, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractAfterLastActiveElement(System.Numerics.Vector mask, System.Numerics.Vector defaultValue, System.Numerics.Vector data) { throw null; } + public static ulong ConditionalExtractAfterLastActiveElement(System.Numerics.Vector mask, ulong defaultValues, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractAfterLastActiveElement(System.Numerics.Vector mask, System.Numerics.Vector defaultValue, System.Numerics.Vector data) { throw null; } + public static float ConditionalExtractAfterLastActiveElement(System.Numerics.Vector mask, float defaultValues, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractAfterLastActiveElement(System.Numerics.Vector mask, System.Numerics.Vector defaultValue, System.Numerics.Vector data) { throw null; } + public static double ConditionalExtractAfterLastActiveElement(System.Numerics.Vector mask, double defaultValues, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractAfterLastActiveElementAndReplicate(System.Numerics.Vector mask, System.Numerics.Vector defaultScalar, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractAfterLastActiveElementAndReplicate(System.Numerics.Vector mask, System.Numerics.Vector defaultScalar, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractAfterLastActiveElementAndReplicate(System.Numerics.Vector mask, System.Numerics.Vector defaultScalar, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractAfterLastActiveElementAndReplicate(System.Numerics.Vector mask, System.Numerics.Vector defaultScalar, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractAfterLastActiveElementAndReplicate(System.Numerics.Vector mask, System.Numerics.Vector defaultScalar, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractAfterLastActiveElementAndReplicate(System.Numerics.Vector mask, System.Numerics.Vector defaultScalar, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractAfterLastActiveElementAndReplicate(System.Numerics.Vector mask, System.Numerics.Vector defaultScalar, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractAfterLastActiveElementAndReplicate(System.Numerics.Vector mask, System.Numerics.Vector defaultScalar, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractAfterLastActiveElementAndReplicate(System.Numerics.Vector mask, System.Numerics.Vector defaultScalar, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractAfterLastActiveElementAndReplicate(System.Numerics.Vector mask, System.Numerics.Vector defaultScalar, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractLastActiveElement(System.Numerics.Vector mask, System.Numerics.Vector defaultValue, System.Numerics.Vector data) { throw null; } + public static sbyte ConditionalExtractLastActiveElement(System.Numerics.Vector mask, sbyte defaultValues, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractLastActiveElement(System.Numerics.Vector mask, System.Numerics.Vector defaultValue, System.Numerics.Vector data) { throw null; } + public static short ConditionalExtractLastActiveElement(System.Numerics.Vector mask, short defaultValues, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractLastActiveElement(System.Numerics.Vector mask, System.Numerics.Vector defaultValue, System.Numerics.Vector data) { throw null; } + public static int ConditionalExtractLastActiveElement(System.Numerics.Vector mask, int defaultValues, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractLastActiveElement(System.Numerics.Vector mask, System.Numerics.Vector defaultValue, System.Numerics.Vector data) { throw null; } + public static long ConditionalExtractLastActiveElement(System.Numerics.Vector mask, long defaultValues, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractLastActiveElement(System.Numerics.Vector mask, System.Numerics.Vector defaultValue, System.Numerics.Vector data) { throw null; } + public static byte ConditionalExtractLastActiveElement(System.Numerics.Vector mask, byte defaultValues, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractLastActiveElement(System.Numerics.Vector mask, System.Numerics.Vector defaultValue, System.Numerics.Vector data) { throw null; } + public static ushort ConditionalExtractLastActiveElement(System.Numerics.Vector mask, ushort defaultValues, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractLastActiveElement(System.Numerics.Vector mask, System.Numerics.Vector defaultValue, System.Numerics.Vector data) { throw null; } + public static uint ConditionalExtractLastActiveElement(System.Numerics.Vector mask, uint defaultValues, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractLastActiveElement(System.Numerics.Vector mask, System.Numerics.Vector defaultValue, System.Numerics.Vector data) { throw null; } + public static ulong ConditionalExtractLastActiveElement(System.Numerics.Vector mask, ulong defaultValues, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractLastActiveElement(System.Numerics.Vector mask, System.Numerics.Vector defaultValue, System.Numerics.Vector data) { throw null; } + public static float ConditionalExtractLastActiveElement(System.Numerics.Vector mask, float defaultValues, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractLastActiveElement(System.Numerics.Vector mask, System.Numerics.Vector defaultValue, System.Numerics.Vector data) { throw null; } + public static double ConditionalExtractLastActiveElement(System.Numerics.Vector mask, double defaultValues, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractLastActiveElementAndReplicate(System.Numerics.Vector mask, System.Numerics.Vector fallback, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractLastActiveElementAndReplicate(System.Numerics.Vector mask, System.Numerics.Vector fallback, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractLastActiveElementAndReplicate(System.Numerics.Vector mask, System.Numerics.Vector fallback, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractLastActiveElementAndReplicate(System.Numerics.Vector mask, System.Numerics.Vector fallback, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractLastActiveElementAndReplicate(System.Numerics.Vector mask, System.Numerics.Vector fallback, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractLastActiveElementAndReplicate(System.Numerics.Vector mask, System.Numerics.Vector fallback, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractLastActiveElementAndReplicate(System.Numerics.Vector mask, System.Numerics.Vector fallback, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractLastActiveElementAndReplicate(System.Numerics.Vector mask, System.Numerics.Vector fallback, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractLastActiveElementAndReplicate(System.Numerics.Vector mask, System.Numerics.Vector fallback, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractLastActiveElementAndReplicate(System.Numerics.Vector mask, System.Numerics.Vector fallback, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalSelect(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConditionalSelect(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConditionalSelect(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConditionalSelect(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConditionalSelect(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConditionalSelect(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConditionalSelect(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConditionalSelect(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConditionalSelect(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConditionalSelect(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConvertToDouble(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToDouble(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToDouble(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToDouble(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToDouble(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToInt32(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToInt32(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToInt64(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToInt64(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToSingle(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToSingle(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToSingle(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToSingle(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToSingle(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToUInt32(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToUInt32(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToUInt64(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToUInt64(System.Numerics.Vector value) { throw null; } + public static ulong Count16BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static ulong Count32BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static ulong Count64BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static ulong Count8BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static System.Numerics.Vector CreateBreakAfterMask(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateBreakAfterMask(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateBreakAfterMask(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateBreakAfterMask(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateBreakAfterMask(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateBreakAfterMask(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateBreakAfterMask(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateBreakAfterMask(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateBreakAfterPropagateMask(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CreateBreakAfterPropagateMask(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CreateBreakAfterPropagateMask(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CreateBreakAfterPropagateMask(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CreateBreakAfterPropagateMask(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CreateBreakAfterPropagateMask(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CreateBreakAfterPropagateMask(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CreateBreakAfterPropagateMask(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CreateBreakBeforeMask(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateBreakBeforeMask(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateBreakBeforeMask(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateBreakBeforeMask(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateBreakBeforeMask(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateBreakBeforeMask(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateBreakBeforeMask(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateBreakBeforeMask(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateBreakBeforePropagateMask(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CreateBreakBeforePropagateMask(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CreateBreakBeforePropagateMask(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CreateBreakBeforePropagateMask(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CreateBreakBeforePropagateMask(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CreateBreakBeforePropagateMask(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CreateBreakBeforePropagateMask(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CreateBreakBeforePropagateMask(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CreateBreakPropagateMask(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateBreakPropagateMask(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateBreakPropagateMask(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateBreakPropagateMask(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateBreakPropagateMask(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateBreakPropagateMask(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateBreakPropagateMask(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateBreakPropagateMask(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateFalseMaskByte() { throw null; } + public static System.Numerics.Vector CreateFalseMaskDouble() { throw null; } + public static System.Numerics.Vector CreateFalseMaskInt16() { throw null; } + public static System.Numerics.Vector CreateFalseMaskInt32() { throw null; } + public static System.Numerics.Vector CreateFalseMaskInt64() { throw null; } + public static System.Numerics.Vector CreateFalseMaskSByte() { throw null; } + public static System.Numerics.Vector CreateFalseMaskSingle() { throw null; } + public static System.Numerics.Vector CreateFalseMaskUInt16() { throw null; } + public static System.Numerics.Vector CreateFalseMaskUInt32() { throw null; } + public static System.Numerics.Vector CreateFalseMaskUInt64() { throw null; } + public static System.Numerics.Vector CreateMaskForFirstActiveElement(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateMaskForFirstActiveElement(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateMaskForFirstActiveElement(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateMaskForFirstActiveElement(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateMaskForFirstActiveElement(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateMaskForFirstActiveElement(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateMaskForFirstActiveElement(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateMaskForFirstActiveElement(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateMaskForNextActiveElement(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateMaskForNextActiveElement(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateMaskForNextActiveElement(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateMaskForNextActiveElement(System.Numerics.Vector totalMask, System.Numerics.Vector fromMask) { throw null; } + public static System.Numerics.Vector CreateTrueMaskByte([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static System.Numerics.Vector CreateTrueMaskDouble([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static System.Numerics.Vector CreateTrueMaskInt16([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static System.Numerics.Vector CreateTrueMaskInt32([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static System.Numerics.Vector CreateTrueMaskInt64([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static System.Numerics.Vector CreateTrueMaskSByte([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static System.Numerics.Vector CreateTrueMaskSingle([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static System.Numerics.Vector CreateTrueMaskUInt16([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static System.Numerics.Vector CreateTrueMaskUInt32([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static System.Numerics.Vector CreateTrueMaskUInt64([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanMask16Bit(int left, int right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanMask16Bit(long left, long right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanMask16Bit(uint left, uint right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanMask16Bit(ulong left, ulong right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanMask32Bit(int left, int right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanMask32Bit(long left, long right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanMask32Bit(uint left, uint right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanMask32Bit(ulong left, ulong right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanMask64Bit(int left, int right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanMask64Bit(long left, long right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanMask64Bit(uint left, uint right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanMask64Bit(ulong left, ulong right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanMask8Bit(int left, int right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanMask8Bit(long left, long right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanMask8Bit(uint left, uint right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanMask8Bit(ulong left, ulong right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanOrEqualMask16Bit(int left, int right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanOrEqualMask16Bit(long left, long right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanOrEqualMask16Bit(uint left, uint right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanOrEqualMask16Bit(ulong left, ulong right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanOrEqualMask32Bit(int left, int right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanOrEqualMask32Bit(long left, long right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanOrEqualMask32Bit(uint left, uint right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanOrEqualMask32Bit(ulong left, ulong right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanOrEqualMask64Bit(int left, int right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanOrEqualMask64Bit(long left, long right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanOrEqualMask64Bit(uint left, uint right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanOrEqualMask64Bit(ulong left, ulong right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanOrEqualMask8Bit(int left, int right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanOrEqualMask8Bit(long left, long right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanOrEqualMask8Bit(uint left, uint right) { throw null; } + public static System.Numerics.Vector CreateWhileLessThanOrEqualMask8Bit(ulong left, ulong right) { throw null; } + public static System.Numerics.Vector Divide(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Divide(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Divide(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Divide(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Divide(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Divide(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector DotProduct(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector DotProduct(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector DotProduct(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector DotProduct(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector DotProductBySelectedScalar(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector DotProductBySelectedScalar(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector DotProductBySelectedScalar(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector DotProductBySelectedScalar(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(System.Numerics.Vector data, [ConstantExpected] byte index) { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(System.Numerics.Vector data, [ConstantExpected] byte index) { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(System.Numerics.Vector data, [ConstantExpected] byte index) { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(System.Numerics.Vector data, [ConstantExpected] byte index) { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(System.Numerics.Vector data, [ConstantExpected] byte index) { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(System.Numerics.Vector data, [ConstantExpected] byte index) { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(System.Numerics.Vector data, [ConstantExpected] byte index) { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(System.Numerics.Vector data, [ConstantExpected] byte index) { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(System.Numerics.Vector data, [ConstantExpected] byte index) { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(System.Numerics.Vector data, [ConstantExpected] byte index) { throw null; } + public static sbyte ExtractAfterLastScalar(System.Numerics.Vector value) { throw null; } + public static short ExtractAfterLastScalar(System.Numerics.Vector value) { throw null; } + public static int ExtractAfterLastScalar(System.Numerics.Vector value) { throw null; } + public static long ExtractAfterLastScalar(System.Numerics.Vector value) { throw null; } + public static byte ExtractAfterLastScalar(System.Numerics.Vector value) { throw null; } + public static ushort ExtractAfterLastScalar(System.Numerics.Vector value) { throw null; } + public static uint ExtractAfterLastScalar(System.Numerics.Vector value) { throw null; } + public static ulong ExtractAfterLastScalar(System.Numerics.Vector value) { throw null; } + public static float ExtractAfterLastScalar(System.Numerics.Vector value) { throw null; } + public static double ExtractAfterLastScalar(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ExtractAfterLastVector(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ExtractAfterLastVector(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ExtractAfterLastVector(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ExtractAfterLastVector(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ExtractAfterLastVector(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ExtractAfterLastVector(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ExtractAfterLastVector(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ExtractAfterLastVector(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ExtractAfterLastVector(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ExtractAfterLastVector(System.Numerics.Vector value) { throw null; } + public static sbyte ExtractLastScalar(System.Numerics.Vector value) { throw null; } + public static short ExtractLastScalar(System.Numerics.Vector value) { throw null; } + public static int ExtractLastScalar(System.Numerics.Vector value) { throw null; } + public static long ExtractLastScalar(System.Numerics.Vector value) { throw null; } + public static byte ExtractLastScalar(System.Numerics.Vector value) { throw null; } + public static ushort ExtractLastScalar(System.Numerics.Vector value) { throw null; } + public static uint ExtractLastScalar(System.Numerics.Vector value) { throw null; } + public static ulong ExtractLastScalar(System.Numerics.Vector value) { throw null; } + public static float ExtractLastScalar(System.Numerics.Vector value) { throw null; } + public static double ExtractLastScalar(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ExtractLastVector(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ExtractLastVector(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ExtractLastVector(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ExtractLastVector(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ExtractLastVector(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ExtractLastVector(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ExtractLastVector(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ExtractLastVector(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ExtractLastVector(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ExtractLastVector(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ExtractVector(System.Numerics.Vector upper, System.Numerics.Vector lower, [ConstantExpected] byte index) { throw null; } + public static System.Numerics.Vector ExtractVector(System.Numerics.Vector upper, System.Numerics.Vector lower, [ConstantExpected] byte index) { throw null; } + public static System.Numerics.Vector ExtractVector(System.Numerics.Vector upper, System.Numerics.Vector lower, [ConstantExpected] byte index) { throw null; } + public static System.Numerics.Vector ExtractVector(System.Numerics.Vector upper, System.Numerics.Vector lower, [ConstantExpected] byte index) { throw null; } + public static System.Numerics.Vector ExtractVector(System.Numerics.Vector upper, System.Numerics.Vector lower, [ConstantExpected] byte index) { throw null; } + public static System.Numerics.Vector ExtractVector(System.Numerics.Vector upper, System.Numerics.Vector lower, [ConstantExpected] byte index) { throw null; } + public static System.Numerics.Vector ExtractVector(System.Numerics.Vector upper, System.Numerics.Vector lower, [ConstantExpected] byte index) { throw null; } + public static System.Numerics.Vector ExtractVector(System.Numerics.Vector upper, System.Numerics.Vector lower, [ConstantExpected] byte index) { throw null; } + public static System.Numerics.Vector ExtractVector(System.Numerics.Vector upper, System.Numerics.Vector lower, [ConstantExpected] byte index) { throw null; } + public static System.Numerics.Vector ExtractVector(System.Numerics.Vector upper, System.Numerics.Vector lower, [ConstantExpected] byte index) { throw null; } + public static System.Numerics.Vector FloatingPointExponentialAccelerator(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector FloatingPointExponentialAccelerator(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector FusedMultiplyAdd(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector FusedMultiplyAdd(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector FusedMultiplyAddBySelectedScalar(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector FusedMultiplyAddBySelectedScalar(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector FusedMultiplyAddNegated(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector FusedMultiplyAddNegated(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector FusedMultiplySubtract(System.Numerics.Vector minuend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector FusedMultiplySubtract(System.Numerics.Vector minuend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector FusedMultiplySubtractBySelectedScalar(System.Numerics.Vector minuend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector FusedMultiplySubtractBySelectedScalar(System.Numerics.Vector minuend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector FusedMultiplySubtractNegated(System.Numerics.Vector minuend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector FusedMultiplySubtractNegated(System.Numerics.Vector minuend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static unsafe void GatherPrefetch16Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch16Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static void GatherPrefetch16Bit(System.Numerics.Vector mask, System.Numerics.Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch16Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static void GatherPrefetch16Bit(System.Numerics.Vector mask, System.Numerics.Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch16Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch16Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch16Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static void GatherPrefetch16Bit(System.Numerics.Vector mask, System.Numerics.Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch16Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static void GatherPrefetch16Bit(System.Numerics.Vector mask, System.Numerics.Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch16Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch32Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch32Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static void GatherPrefetch32Bit(System.Numerics.Vector mask, System.Numerics.Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch32Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static void GatherPrefetch32Bit(System.Numerics.Vector mask, System.Numerics.Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch32Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch32Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch32Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static void GatherPrefetch32Bit(System.Numerics.Vector mask, System.Numerics.Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch32Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static void GatherPrefetch32Bit(System.Numerics.Vector mask, System.Numerics.Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch32Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch64Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch64Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static void GatherPrefetch64Bit(System.Numerics.Vector mask, System.Numerics.Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch64Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static void GatherPrefetch64Bit(System.Numerics.Vector mask, System.Numerics.Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch64Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch64Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch64Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static void GatherPrefetch64Bit(System.Numerics.Vector mask, System.Numerics.Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch64Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static void GatherPrefetch64Bit(System.Numerics.Vector mask, System.Numerics.Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch64Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector indices, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch8Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch8Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static void GatherPrefetch8Bit(System.Numerics.Vector mask, System.Numerics.Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch8Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static void GatherPrefetch8Bit(System.Numerics.Vector mask, System.Numerics.Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch8Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch8Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch8Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static void GatherPrefetch8Bit(System.Numerics.Vector mask, System.Numerics.Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch8Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static void GatherPrefetch8Bit(System.Numerics.Vector mask, System.Numerics.Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void GatherPrefetch8Bit(System.Numerics.Vector mask, void* address, System.Numerics.Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe System.Numerics.Vector GatherVector(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVector(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVector(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVector(System.Numerics.Vector mask, long* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVector(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVector(System.Numerics.Vector mask, long* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVector(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVector(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVector(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVector(System.Numerics.Vector mask, ulong* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVector(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVector(System.Numerics.Vector mask, ulong* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVector(System.Numerics.Vector mask, float* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVector(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVector(System.Numerics.Vector mask, float* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVector(System.Numerics.Vector mask, double* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVector(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVector(System.Numerics.Vector mask, double* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorByteZeroExtend(System.Numerics.Vector mask, byte* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorByteZeroExtend(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorByteZeroExtend(System.Numerics.Vector mask, byte* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorByteZeroExtend(System.Numerics.Vector mask, byte* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorByteZeroExtend(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorByteZeroExtend(System.Numerics.Vector mask, byte* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorByteZeroExtend(System.Numerics.Vector mask, byte* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorByteZeroExtend(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorByteZeroExtend(System.Numerics.Vector mask, byte* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorByteZeroExtend(System.Numerics.Vector mask, byte* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorByteZeroExtend(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorByteZeroExtend(System.Numerics.Vector mask, byte* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorByteZeroExtendFirstFaulting(System.Numerics.Vector mask, byte* address, System.Numerics.Vector offsets) { throw null; } + public static System.Numerics.Vector GatherVectorByteZeroExtendFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorByteZeroExtendFirstFaulting(System.Numerics.Vector mask, byte* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorByteZeroExtendFirstFaulting(System.Numerics.Vector mask, byte* address, System.Numerics.Vector offsets) { throw null; } + public static System.Numerics.Vector GatherVectorByteZeroExtendFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorByteZeroExtendFirstFaulting(System.Numerics.Vector mask, byte* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorByteZeroExtendFirstFaulting(System.Numerics.Vector mask, byte* address, System.Numerics.Vector offsets) { throw null; } + public static System.Numerics.Vector GatherVectorByteZeroExtendFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorByteZeroExtendFirstFaulting(System.Numerics.Vector mask, byte* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorByteZeroExtendFirstFaulting(System.Numerics.Vector mask, byte* address, System.Numerics.Vector offsets) { throw null; } + public static System.Numerics.Vector GatherVectorByteZeroExtendFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorByteZeroExtendFirstFaulting(System.Numerics.Vector mask, byte* address, System.Numerics.Vector offsets) { throw null; } + public static System.Numerics.Vector GatherVectorFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorFirstFaulting(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorFirstFaulting(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorFirstFaulting(System.Numerics.Vector mask, long* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorFirstFaulting(System.Numerics.Vector mask, long* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorFirstFaulting(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorFirstFaulting(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorFirstFaulting(System.Numerics.Vector mask, ulong* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorFirstFaulting(System.Numerics.Vector mask, ulong* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorFirstFaulting(System.Numerics.Vector mask, float* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorFirstFaulting(System.Numerics.Vector mask, float* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorFirstFaulting(System.Numerics.Vector mask, double* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorFirstFaulting(System.Numerics.Vector mask, double* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16SignExtend(System.Numerics.Vector mask, short* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorInt16SignExtend(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16SignExtend(System.Numerics.Vector mask, short* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16SignExtend(System.Numerics.Vector mask, short* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorInt16SignExtend(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16SignExtend(System.Numerics.Vector mask, short* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16SignExtend(System.Numerics.Vector mask, short* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorInt16SignExtend(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16SignExtend(System.Numerics.Vector mask, short* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16SignExtend(System.Numerics.Vector mask, short* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorInt16SignExtend(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16SignExtend(System.Numerics.Vector mask, short* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16SignExtendFirstFaulting(System.Numerics.Vector mask, short* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorInt16SignExtendFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16SignExtendFirstFaulting(System.Numerics.Vector mask, short* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16SignExtendFirstFaulting(System.Numerics.Vector mask, short* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorInt16SignExtendFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16SignExtendFirstFaulting(System.Numerics.Vector mask, short* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16SignExtendFirstFaulting(System.Numerics.Vector mask, short* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorInt16SignExtendFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16SignExtendFirstFaulting(System.Numerics.Vector mask, short* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16SignExtendFirstFaulting(System.Numerics.Vector mask, short* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorInt16SignExtendFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16SignExtendFirstFaulting(System.Numerics.Vector mask, short* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16WithByteOffsetsSignExtend(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16WithByteOffsetsSignExtend(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16WithByteOffsetsSignExtend(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16WithByteOffsetsSignExtend(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16WithByteOffsetsSignExtend(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16WithByteOffsetsSignExtend(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16WithByteOffsetsSignExtend(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16WithByteOffsetsSignExtend(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32SignExtend(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorInt32SignExtend(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32SignExtend(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32SignExtend(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorInt32SignExtend(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32SignExtend(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32SignExtend(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorInt32SignExtend(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32SignExtend(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32SignExtend(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorInt32SignExtend(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32SignExtend(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32SignExtendFirstFaulting(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorInt32SignExtendFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32SignExtendFirstFaulting(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32SignExtendFirstFaulting(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorInt32SignExtendFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32SignExtendFirstFaulting(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32SignExtendFirstFaulting(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorInt32SignExtendFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32SignExtendFirstFaulting(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32SignExtendFirstFaulting(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorInt32SignExtendFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32SignExtendFirstFaulting(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32WithByteOffsetsSignExtend(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32WithByteOffsetsSignExtend(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32WithByteOffsetsSignExtend(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32WithByteOffsetsSignExtend(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32WithByteOffsetsSignExtend(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32WithByteOffsetsSignExtend(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32WithByteOffsetsSignExtend(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32WithByteOffsetsSignExtend(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorSByteSignExtend(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorSByteSignExtend(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorSByteSignExtend(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorSByteSignExtend(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorSByteSignExtend(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorSByteSignExtend(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorSByteSignExtend(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorSByteSignExtend(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorSByteSignExtend(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorSByteSignExtend(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorSByteSignExtend(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorSByteSignExtend(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorSByteSignExtendFirstFaulting(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector offsets) { throw null; } + public static System.Numerics.Vector GatherVectorSByteSignExtendFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorSByteSignExtendFirstFaulting(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorSByteSignExtendFirstFaulting(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector offsets) { throw null; } + public static System.Numerics.Vector GatherVectorSByteSignExtendFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorSByteSignExtendFirstFaulting(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorSByteSignExtendFirstFaulting(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector offsets) { throw null; } + public static System.Numerics.Vector GatherVectorSByteSignExtendFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorSByteSignExtendFirstFaulting(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorSByteSignExtendFirstFaulting(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector offsets) { throw null; } + public static System.Numerics.Vector GatherVectorSByteSignExtendFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorSByteSignExtendFirstFaulting(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16WithByteOffsetsZeroExtend(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16WithByteOffsetsZeroExtend(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16WithByteOffsetsZeroExtend(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16WithByteOffsetsZeroExtend(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16WithByteOffsetsZeroExtend(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16WithByteOffsetsZeroExtend(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16WithByteOffsetsZeroExtend(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16WithByteOffsetsZeroExtend(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16ZeroExtend(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorUInt16ZeroExtend(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16ZeroExtend(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16ZeroExtend(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorUInt16ZeroExtend(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16ZeroExtend(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16ZeroExtend(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorUInt16ZeroExtend(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16ZeroExtend(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16ZeroExtend(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorUInt16ZeroExtend(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16ZeroExtend(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16ZeroExtendFirstFaulting(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorUInt16ZeroExtendFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16ZeroExtendFirstFaulting(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16ZeroExtendFirstFaulting(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorUInt16ZeroExtendFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16ZeroExtendFirstFaulting(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16ZeroExtendFirstFaulting(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorUInt16ZeroExtendFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16ZeroExtendFirstFaulting(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16ZeroExtendFirstFaulting(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorUInt16ZeroExtendFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16ZeroExtendFirstFaulting(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32WithByteOffsetsZeroExtend(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32WithByteOffsetsZeroExtend(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32WithByteOffsetsZeroExtend(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32WithByteOffsetsZeroExtend(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32WithByteOffsetsZeroExtend(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32WithByteOffsetsZeroExtend(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32WithByteOffsetsZeroExtend(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32WithByteOffsetsZeroExtend(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32ZeroExtend(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorUInt32ZeroExtend(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32ZeroExtend(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32ZeroExtend(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorUInt32ZeroExtend(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32ZeroExtend(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32ZeroExtend(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorUInt32ZeroExtend(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32ZeroExtend(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32ZeroExtend(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorUInt32ZeroExtend(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32ZeroExtend(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32ZeroExtendFirstFaulting(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorUInt32ZeroExtendFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32ZeroExtendFirstFaulting(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32ZeroExtendFirstFaulting(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorUInt32ZeroExtendFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32ZeroExtendFirstFaulting(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32ZeroExtendFirstFaulting(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorUInt32ZeroExtendFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32ZeroExtendFirstFaulting(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32ZeroExtendFirstFaulting(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorUInt32ZeroExtendFirstFaulting(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32ZeroExtendFirstFaulting(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorWithByteOffsetFirstFaulting(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorWithByteOffsetFirstFaulting(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorWithByteOffsetFirstFaulting(System.Numerics.Vector mask, long* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorWithByteOffsetFirstFaulting(System.Numerics.Vector mask, long* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorWithByteOffsetFirstFaulting(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorWithByteOffsetFirstFaulting(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorWithByteOffsetFirstFaulting(System.Numerics.Vector mask, ulong* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorWithByteOffsetFirstFaulting(System.Numerics.Vector mask, ulong* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorWithByteOffsetFirstFaulting(System.Numerics.Vector mask, float* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorWithByteOffsetFirstFaulting(System.Numerics.Vector mask, float* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorWithByteOffsetFirstFaulting(System.Numerics.Vector mask, double* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorWithByteOffsetFirstFaulting(System.Numerics.Vector mask, double* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorWithByteOffsets(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorWithByteOffsets(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorWithByteOffsets(System.Numerics.Vector mask, long* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorWithByteOffsets(System.Numerics.Vector mask, long* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorWithByteOffsets(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorWithByteOffsets(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorWithByteOffsets(System.Numerics.Vector mask, ulong* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorWithByteOffsets(System.Numerics.Vector mask, ulong* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorWithByteOffsets(System.Numerics.Vector mask, float* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorWithByteOffsets(System.Numerics.Vector mask, float* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorWithByteOffsets(System.Numerics.Vector mask, double* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorWithByteOffsets(System.Numerics.Vector mask, double* address, System.Numerics.Vector offsets) { throw null; } + public static ulong GetActiveElementCount(System.Numerics.Vector mask, System.Numerics.Vector from) { throw null; } + public static ulong GetActiveElementCount(System.Numerics.Vector mask, System.Numerics.Vector from) { throw null; } + public static ulong GetActiveElementCount(System.Numerics.Vector mask, System.Numerics.Vector from) { throw null; } + public static ulong GetActiveElementCount(System.Numerics.Vector mask, System.Numerics.Vector from) { throw null; } + public static ulong GetActiveElementCount(System.Numerics.Vector mask, System.Numerics.Vector from) { throw null; } + public static ulong GetActiveElementCount(System.Numerics.Vector mask, System.Numerics.Vector from) { throw null; } + public static ulong GetActiveElementCount(System.Numerics.Vector mask, System.Numerics.Vector from) { throw null; } + public static ulong GetActiveElementCount(System.Numerics.Vector mask, System.Numerics.Vector from) { throw null; } + public static ulong GetActiveElementCount(System.Numerics.Vector mask, System.Numerics.Vector from) { throw null; } + public static ulong GetActiveElementCount(System.Numerics.Vector mask, System.Numerics.Vector from) { throw null; } + public static System.Numerics.Vector GetFfr() { throw null; } + public static System.Numerics.Vector GetFfr() { throw null; } + public static System.Numerics.Vector GetFfr() { throw null; } + public static System.Numerics.Vector GetFfr() { throw null; } + public static System.Numerics.Vector GetFfr() { throw null; } + public static System.Numerics.Vector GetFfr() { throw null; } + public static System.Numerics.Vector GetFfr() { throw null; } + public static System.Numerics.Vector GetFfr() { throw null; } + public static System.Numerics.Vector InsertIntoShiftedVector(System.Numerics.Vector left, sbyte right) { throw null; } + public static System.Numerics.Vector InsertIntoShiftedVector(System.Numerics.Vector left, short right) { throw null; } + public static System.Numerics.Vector InsertIntoShiftedVector(System.Numerics.Vector left, int right) { throw null; } + public static System.Numerics.Vector InsertIntoShiftedVector(System.Numerics.Vector left, long right) { throw null; } + public static System.Numerics.Vector InsertIntoShiftedVector(System.Numerics.Vector left, byte right) { throw null; } + public static System.Numerics.Vector InsertIntoShiftedVector(System.Numerics.Vector left, ushort right) { throw null; } + public static System.Numerics.Vector InsertIntoShiftedVector(System.Numerics.Vector left, uint right) { throw null; } + public static System.Numerics.Vector InsertIntoShiftedVector(System.Numerics.Vector left, ulong right) { throw null; } + public static System.Numerics.Vector InsertIntoShiftedVector(System.Numerics.Vector left, float right) { throw null; } + public static System.Numerics.Vector InsertIntoShiftedVector(System.Numerics.Vector left, double right) { throw null; } + public static System.Numerics.Vector LeadingSignCount(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector LeadingSignCount(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector LeadingSignCount(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector LeadingSignCount(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector LeadingZeroCount(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector LeadingZeroCount(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector LeadingZeroCount(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector LeadingZeroCount(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector LeadingZeroCount(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector LeadingZeroCount(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector LeadingZeroCount(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector LeadingZeroCount(System.Numerics.Vector value) { throw null; } + public static unsafe System.Numerics.Vector LoadVector(System.Numerics.Vector mask, sbyte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector(System.Numerics.Vector mask, short* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector(System.Numerics.Vector mask, int* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector(System.Numerics.Vector mask, long* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector(System.Numerics.Vector mask, byte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector(System.Numerics.Vector mask, ushort* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector(System.Numerics.Vector mask, uint* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector(System.Numerics.Vector mask, ulong* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector(System.Numerics.Vector mask, float* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector(System.Numerics.Vector mask, double* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector128AndReplicateToVector(System.Numerics.Vector mask, sbyte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector128AndReplicateToVector(System.Numerics.Vector mask, short* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector128AndReplicateToVector(System.Numerics.Vector mask, int* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector128AndReplicateToVector(System.Numerics.Vector mask, long* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector128AndReplicateToVector(System.Numerics.Vector mask, byte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector128AndReplicateToVector(System.Numerics.Vector mask, ushort* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector128AndReplicateToVector(System.Numerics.Vector mask, uint* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector128AndReplicateToVector(System.Numerics.Vector mask, ulong* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector128AndReplicateToVector(System.Numerics.Vector mask, float* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector128AndReplicateToVector(System.Numerics.Vector mask, double* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorByteNonFaultingZeroExtendToInt16(byte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorByteNonFaultingZeroExtendToInt32(byte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorByteNonFaultingZeroExtendToInt64(byte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorByteNonFaultingZeroExtendToUInt16(byte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorByteNonFaultingZeroExtendToUInt32(byte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorByteNonFaultingZeroExtendToUInt64(byte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorByteZeroExtendFirstFaulting(System.Numerics.Vector mask, byte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorByteZeroExtendFirstFaulting(System.Numerics.Vector mask, byte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorByteZeroExtendFirstFaulting(System.Numerics.Vector mask, byte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorByteZeroExtendFirstFaulting(System.Numerics.Vector mask, byte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorByteZeroExtendFirstFaulting(System.Numerics.Vector mask, byte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorByteZeroExtendFirstFaulting(System.Numerics.Vector mask, byte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorByteZeroExtendToInt16(System.Numerics.Vector mask, byte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorByteZeroExtendToInt32(System.Numerics.Vector mask, byte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorByteZeroExtendToInt64(System.Numerics.Vector mask, byte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorByteZeroExtendToUInt16(System.Numerics.Vector mask, byte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorByteZeroExtendToUInt32(System.Numerics.Vector mask, byte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorByteZeroExtendToUInt64(System.Numerics.Vector mask, byte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorFirstFaulting(System.Numerics.Vector mask, sbyte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorFirstFaulting(System.Numerics.Vector mask, short* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorFirstFaulting(System.Numerics.Vector mask, int* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorFirstFaulting(System.Numerics.Vector mask, long* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorFirstFaulting(System.Numerics.Vector mask, byte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorFirstFaulting(System.Numerics.Vector mask, ushort* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorFirstFaulting(System.Numerics.Vector mask, uint* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorFirstFaulting(System.Numerics.Vector mask, ulong* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorFirstFaulting(System.Numerics.Vector mask, float* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorFirstFaulting(System.Numerics.Vector mask, double* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorInt16NonFaultingSignExtendToInt32(short* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorInt16NonFaultingSignExtendToInt64(short* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorInt16NonFaultingSignExtendToUInt32(short* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorInt16NonFaultingSignExtendToUInt64(short* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorInt16SignExtendFirstFaulting(System.Numerics.Vector mask, short* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorInt16SignExtendFirstFaulting(System.Numerics.Vector mask, short* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorInt16SignExtendFirstFaulting(System.Numerics.Vector mask, short* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorInt16SignExtendFirstFaulting(System.Numerics.Vector mask, short* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorInt16SignExtendToInt32(System.Numerics.Vector mask, short* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorInt16SignExtendToInt64(System.Numerics.Vector mask, short* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorInt16SignExtendToUInt32(System.Numerics.Vector mask, short* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorInt16SignExtendToUInt64(System.Numerics.Vector mask, short* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorInt32NonFaultingSignExtendToInt64(int* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorInt32NonFaultingSignExtendToUInt64(int* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorInt32SignExtendFirstFaulting(System.Numerics.Vector mask, int* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorInt32SignExtendFirstFaulting(System.Numerics.Vector mask, int* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorInt32SignExtendToInt64(System.Numerics.Vector mask, int* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorInt32SignExtendToUInt64(System.Numerics.Vector mask, int* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorNonFaulting(sbyte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorNonFaulting(short* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorNonFaulting(int* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorNonFaulting(long* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorNonFaulting(byte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorNonFaulting(ushort* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorNonFaulting(uint* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorNonFaulting(ulong* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorNonFaulting(float* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorNonFaulting(double* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorNonTemporal(System.Numerics.Vector mask, sbyte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorNonTemporal(System.Numerics.Vector mask, short* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorNonTemporal(System.Numerics.Vector mask, int* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorNonTemporal(System.Numerics.Vector mask, long* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorNonTemporal(System.Numerics.Vector mask, byte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorNonTemporal(System.Numerics.Vector mask, ushort* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorNonTemporal(System.Numerics.Vector mask, uint* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorNonTemporal(System.Numerics.Vector mask, ulong* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorNonTemporal(System.Numerics.Vector mask, float* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorNonTemporal(System.Numerics.Vector mask, double* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorSByteNonFaultingSignExtendToInt16(sbyte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorSByteNonFaultingSignExtendToInt32(sbyte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorSByteNonFaultingSignExtendToInt64(sbyte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorSByteNonFaultingSignExtendToUInt16(sbyte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorSByteNonFaultingSignExtendToUInt32(sbyte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorSByteNonFaultingSignExtendToUInt64(sbyte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorSByteSignExtendFirstFaulting(System.Numerics.Vector mask, sbyte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorSByteSignExtendFirstFaulting(System.Numerics.Vector mask, sbyte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorSByteSignExtendFirstFaulting(System.Numerics.Vector mask, sbyte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorSByteSignExtendFirstFaulting(System.Numerics.Vector mask, sbyte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorSByteSignExtendFirstFaulting(System.Numerics.Vector mask, sbyte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorSByteSignExtendFirstFaulting(System.Numerics.Vector mask, sbyte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorSByteSignExtendToInt16(System.Numerics.Vector mask, sbyte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorSByteSignExtendToInt32(System.Numerics.Vector mask, sbyte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorSByteSignExtendToInt64(System.Numerics.Vector mask, sbyte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorSByteSignExtendToUInt16(System.Numerics.Vector mask, sbyte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorSByteSignExtendToUInt32(System.Numerics.Vector mask, sbyte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorSByteSignExtendToUInt64(System.Numerics.Vector mask, sbyte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorUInt16NonFaultingZeroExtendToInt32(ushort* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorUInt16NonFaultingZeroExtendToInt64(ushort* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorUInt16NonFaultingZeroExtendToUInt32(ushort* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorUInt16NonFaultingZeroExtendToUInt64(ushort* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorUInt16ZeroExtendFirstFaulting(System.Numerics.Vector mask, ushort* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorUInt16ZeroExtendFirstFaulting(System.Numerics.Vector mask, ushort* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorUInt16ZeroExtendFirstFaulting(System.Numerics.Vector mask, ushort* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorUInt16ZeroExtendFirstFaulting(System.Numerics.Vector mask, ushort* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorUInt16ZeroExtendToInt32(System.Numerics.Vector mask, ushort* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorUInt16ZeroExtendToInt64(System.Numerics.Vector mask, ushort* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorUInt16ZeroExtendToUInt32(System.Numerics.Vector mask, ushort* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorUInt16ZeroExtendToUInt64(System.Numerics.Vector mask, ushort* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorUInt32NonFaultingZeroExtendToInt64(uint* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorUInt32NonFaultingZeroExtendToUInt64(uint* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorUInt32ZeroExtendFirstFaulting(System.Numerics.Vector mask, uint* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorUInt32ZeroExtendFirstFaulting(System.Numerics.Vector mask, uint* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorUInt32ZeroExtendToInt64(System.Numerics.Vector mask, uint* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorUInt32ZeroExtendToUInt64(System.Numerics.Vector mask, uint* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector) LoadVectorx2(System.Numerics.Vector mask, sbyte* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector) LoadVectorx2(System.Numerics.Vector mask, short* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector) LoadVectorx2(System.Numerics.Vector mask, int* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector) LoadVectorx2(System.Numerics.Vector mask, long* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector) LoadVectorx2(System.Numerics.Vector mask, byte* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector) LoadVectorx2(System.Numerics.Vector mask, ushort* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector) LoadVectorx2(System.Numerics.Vector mask, uint* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector) LoadVectorx2(System.Numerics.Vector mask, ulong* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector) LoadVectorx2(System.Numerics.Vector mask, float* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector) LoadVectorx2(System.Numerics.Vector mask, double* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) LoadVectorx3(System.Numerics.Vector mask, sbyte* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) LoadVectorx3(System.Numerics.Vector mask, short* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) LoadVectorx3(System.Numerics.Vector mask, int* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) LoadVectorx3(System.Numerics.Vector mask, long* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) LoadVectorx3(System.Numerics.Vector mask, byte* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) LoadVectorx3(System.Numerics.Vector mask, ushort* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) LoadVectorx3(System.Numerics.Vector mask, uint* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) LoadVectorx3(System.Numerics.Vector mask, ulong* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) LoadVectorx3(System.Numerics.Vector mask, float* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) LoadVectorx3(System.Numerics.Vector mask, double* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) LoadVectorx4(System.Numerics.Vector mask, sbyte* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) LoadVectorx4(System.Numerics.Vector mask, short* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) LoadVectorx4(System.Numerics.Vector mask, int* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) LoadVectorx4(System.Numerics.Vector mask, long* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) LoadVectorx4(System.Numerics.Vector mask, byte* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) LoadVectorx4(System.Numerics.Vector mask, ushort* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) LoadVectorx4(System.Numerics.Vector mask, uint* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) LoadVectorx4(System.Numerics.Vector mask, ulong* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) LoadVectorx4(System.Numerics.Vector mask, float* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) LoadVectorx4(System.Numerics.Vector mask, double* address) { throw null; } + public static System.Numerics.Vector Max(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Max(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Max(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Max(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Max(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Max(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Max(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Max(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Max(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Max(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MaxAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MaxAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MaxAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MaxAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MaxAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MaxAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MaxAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MaxAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MaxAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MaxAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MaxNumber(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MaxNumber(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MaxNumberAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MaxNumberAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Min(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Min(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Min(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Min(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Min(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Min(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Min(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Min(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Min(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Min(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MinAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MinAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MinAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MinAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MinAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MinAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MinAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MinAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MinAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MinAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MinNumber(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MinNumber(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MinNumberAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MinNumberAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Multiply(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Multiply(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Multiply(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Multiply(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Multiply(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Multiply(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Multiply(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Multiply(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Multiply(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Multiply(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplyAdd(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplyAdd(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplyAdd(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplyAdd(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplyAdd(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplyAdd(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplyAdd(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplyAdd(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplyAddRotateComplex(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector MultiplyAddRotateComplex(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector MultiplyAddRotateComplexBySelectedScalar(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector MultiplyBySelectedScalar(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector MultiplyBySelectedScalar(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector MultiplyExtended(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplyExtended(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplySubtract(System.Numerics.Vector minuend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplySubtract(System.Numerics.Vector minuend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplySubtract(System.Numerics.Vector minuend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplySubtract(System.Numerics.Vector minuend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplySubtract(System.Numerics.Vector minuend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplySubtract(System.Numerics.Vector minuend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplySubtract(System.Numerics.Vector minuend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplySubtract(System.Numerics.Vector minuend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Negate(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Negate(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Negate(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Negate(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Negate(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Negate(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Not(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Not(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Not(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Not(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Not(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Not(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Not(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Not(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Or(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Or(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Or(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Or(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Or(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Or(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Or(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Or(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector OrAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector OrAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector OrAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector OrAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector OrAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector OrAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector OrAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector OrAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector OrNot(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector OrNot(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector OrNot(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector OrNot(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector OrNot(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector OrNot(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector OrNot(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector OrNot(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector PopCount(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector PopCount(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector PopCount(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector PopCount(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector PopCount(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector PopCount(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector PopCount(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector PopCount(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector PopCount(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector PopCount(System.Numerics.Vector value) { throw null; } + public static unsafe void PrefetchBytes(System.Numerics.Vector mask, void* address, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void PrefetchInt16(System.Numerics.Vector mask, void* address, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void PrefetchInt32(System.Numerics.Vector mask, void* address, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static unsafe void PrefetchInt64(System.Numerics.Vector mask, void* address, [ConstantExpected] SvePrefetchType prefetchType) { throw null; } + public static System.Numerics.Vector ReciprocalEstimate(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReciprocalEstimate(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReciprocalExponent(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReciprocalExponent(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReciprocalSqrtEstimate(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReciprocalSqrtEstimate(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReciprocalSqrtStep(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ReciprocalSqrtStep(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ReciprocalStep(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ReciprocalStep(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ReverseBits(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseBits(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseBits(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseBits(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseBits(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseBits(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseBits(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseBits(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseElement(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseElement(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseElement(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseElement(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseElement(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseElement(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseElement(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseElement(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseElement(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseElement(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseElement16(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseElement16(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseElement16(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseElement16(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseElement32(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseElement32(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseElement8(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseElement8(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseElement8(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseElement8(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseElement8(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseElement8(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector RoundAwayFromZero(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector RoundAwayFromZero(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector RoundToNearest(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector RoundToNearest(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector RoundToNegativeInfinity(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector RoundToNegativeInfinity(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector RoundToPositiveInfinity(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector RoundToPositiveInfinity(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector RoundToZero(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector RoundToZero(System.Numerics.Vector value) { throw null; } + public static int SaturatingDecrementBy16BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static long SaturatingDecrementBy16BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static uint SaturatingDecrementBy16BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static ulong SaturatingDecrementBy16BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static System.Numerics.Vector SaturatingDecrementBy16BitElementCount(System.Numerics.Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static System.Numerics.Vector SaturatingDecrementBy16BitElementCount(System.Numerics.Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static int SaturatingDecrementBy32BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static long SaturatingDecrementBy32BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static uint SaturatingDecrementBy32BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static ulong SaturatingDecrementBy32BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static System.Numerics.Vector SaturatingDecrementBy32BitElementCount(System.Numerics.Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static System.Numerics.Vector SaturatingDecrementBy32BitElementCount(System.Numerics.Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static int SaturatingDecrementBy64BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static long SaturatingDecrementBy64BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static uint SaturatingDecrementBy64BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static ulong SaturatingDecrementBy64BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static System.Numerics.Vector SaturatingDecrementBy64BitElementCount(System.Numerics.Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static System.Numerics.Vector SaturatingDecrementBy64BitElementCount(System.Numerics.Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static int SaturatingDecrementBy8BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static long SaturatingDecrementBy8BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static uint SaturatingDecrementBy8BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static ulong SaturatingDecrementBy8BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static System.Numerics.Vector SaturatingDecrementByActiveElementCount(System.Numerics.Vector value, System.Numerics.Vector from) { throw null; } + public static System.Numerics.Vector SaturatingDecrementByActiveElementCount(System.Numerics.Vector value, System.Numerics.Vector from) { throw null; } + public static System.Numerics.Vector SaturatingDecrementByActiveElementCount(System.Numerics.Vector value, System.Numerics.Vector from) { throw null; } + public static int SaturatingDecrementByActiveElementCount(int value, System.Numerics.Vector from) { throw null; } + public static long SaturatingDecrementByActiveElementCount(long value, System.Numerics.Vector from) { throw null; } + public static uint SaturatingDecrementByActiveElementCount(uint value, System.Numerics.Vector from) { throw null; } + public static ulong SaturatingDecrementByActiveElementCount(ulong value, System.Numerics.Vector from) { throw null; } + public static int SaturatingDecrementByActiveElementCount(int value, System.Numerics.Vector from) { throw null; } + public static long SaturatingDecrementByActiveElementCount(long value, System.Numerics.Vector from) { throw null; } + public static uint SaturatingDecrementByActiveElementCount(uint value, System.Numerics.Vector from) { throw null; } + public static ulong SaturatingDecrementByActiveElementCount(ulong value, System.Numerics.Vector from) { throw null; } + public static System.Numerics.Vector SaturatingDecrementByActiveElementCount(System.Numerics.Vector value, System.Numerics.Vector from) { throw null; } + public static int SaturatingDecrementByActiveElementCount(int value, System.Numerics.Vector from) { throw null; } + public static long SaturatingDecrementByActiveElementCount(long value, System.Numerics.Vector from) { throw null; } + public static uint SaturatingDecrementByActiveElementCount(uint value, System.Numerics.Vector from) { throw null; } + public static ulong SaturatingDecrementByActiveElementCount(ulong value, System.Numerics.Vector from) { throw null; } + public static System.Numerics.Vector SaturatingDecrementByActiveElementCount(System.Numerics.Vector value, System.Numerics.Vector from) { throw null; } + public static int SaturatingDecrementByActiveElementCount(int value, System.Numerics.Vector from) { throw null; } + public static long SaturatingDecrementByActiveElementCount(long value, System.Numerics.Vector from) { throw null; } + public static uint SaturatingDecrementByActiveElementCount(uint value, System.Numerics.Vector from) { throw null; } + public static ulong SaturatingDecrementByActiveElementCount(ulong value, System.Numerics.Vector from) { throw null; } + public static System.Numerics.Vector SaturatingDecrementByActiveElementCount(System.Numerics.Vector value, System.Numerics.Vector from) { throw null; } + public static int SaturatingIncrementBy16BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static long SaturatingIncrementBy16BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static uint SaturatingIncrementBy16BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static ulong SaturatingIncrementBy16BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static System.Numerics.Vector SaturatingIncrementBy16BitElementCount(System.Numerics.Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static System.Numerics.Vector SaturatingIncrementBy16BitElementCount(System.Numerics.Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static int SaturatingIncrementBy32BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static long SaturatingIncrementBy32BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static uint SaturatingIncrementBy32BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static ulong SaturatingIncrementBy32BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static System.Numerics.Vector SaturatingIncrementBy32BitElementCount(System.Numerics.Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static System.Numerics.Vector SaturatingIncrementBy32BitElementCount(System.Numerics.Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static int SaturatingIncrementBy64BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static long SaturatingIncrementBy64BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static uint SaturatingIncrementBy64BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static ulong SaturatingIncrementBy64BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static System.Numerics.Vector SaturatingIncrementBy64BitElementCount(System.Numerics.Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static System.Numerics.Vector SaturatingIncrementBy64BitElementCount(System.Numerics.Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static int SaturatingIncrementBy8BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static long SaturatingIncrementBy8BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static uint SaturatingIncrementBy8BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static ulong SaturatingIncrementBy8BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static System.Numerics.Vector SaturatingIncrementByActiveElementCount(System.Numerics.Vector value, System.Numerics.Vector from) { throw null; } + public static System.Numerics.Vector SaturatingIncrementByActiveElementCount(System.Numerics.Vector value, System.Numerics.Vector from) { throw null; } + public static System.Numerics.Vector SaturatingIncrementByActiveElementCount(System.Numerics.Vector value, System.Numerics.Vector from) { throw null; } + public static int SaturatingIncrementByActiveElementCount(int value, System.Numerics.Vector from) { throw null; } + public static long SaturatingIncrementByActiveElementCount(long value, System.Numerics.Vector from) { throw null; } + public static uint SaturatingIncrementByActiveElementCount(uint value, System.Numerics.Vector from) { throw null; } + public static ulong SaturatingIncrementByActiveElementCount(ulong value, System.Numerics.Vector from) { throw null; } + public static int SaturatingIncrementByActiveElementCount(int value, System.Numerics.Vector from) { throw null; } + public static long SaturatingIncrementByActiveElementCount(long value, System.Numerics.Vector from) { throw null; } + public static uint SaturatingIncrementByActiveElementCount(uint value, System.Numerics.Vector from) { throw null; } + public static ulong SaturatingIncrementByActiveElementCount(ulong value, System.Numerics.Vector from) { throw null; } + public static System.Numerics.Vector SaturatingIncrementByActiveElementCount(System.Numerics.Vector value, System.Numerics.Vector from) { throw null; } + public static int SaturatingIncrementByActiveElementCount(int value, System.Numerics.Vector from) { throw null; } + public static long SaturatingIncrementByActiveElementCount(long value, System.Numerics.Vector from) { throw null; } + public static uint SaturatingIncrementByActiveElementCount(uint value, System.Numerics.Vector from) { throw null; } + public static ulong SaturatingIncrementByActiveElementCount(ulong value, System.Numerics.Vector from) { throw null; } + public static System.Numerics.Vector SaturatingIncrementByActiveElementCount(System.Numerics.Vector value, System.Numerics.Vector from) { throw null; } + public static int SaturatingIncrementByActiveElementCount(int value, System.Numerics.Vector from) { throw null; } + public static long SaturatingIncrementByActiveElementCount(long value, System.Numerics.Vector from) { throw null; } + public static uint SaturatingIncrementByActiveElementCount(uint value, System.Numerics.Vector from) { throw null; } + public static ulong SaturatingIncrementByActiveElementCount(ulong value, System.Numerics.Vector from) { throw null; } + public static System.Numerics.Vector SaturatingIncrementByActiveElementCount(System.Numerics.Vector value, System.Numerics.Vector from) { throw null; } + public static System.Numerics.Vector Scale(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Scale(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static unsafe void Scatter(System.Numerics.Vector mask, int* address, System.Numerics.Vector indicies, System.Numerics.Vector data) { throw null; } + public static void Scatter(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter(System.Numerics.Vector mask, int* address, System.Numerics.Vector indicies, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter(System.Numerics.Vector mask, long* address, System.Numerics.Vector indicies, System.Numerics.Vector data) { throw null; } + public static void Scatter(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter(System.Numerics.Vector mask, long* address, System.Numerics.Vector indicies, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indicies, System.Numerics.Vector data) { throw null; } + public static void Scatter(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indicies, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter(System.Numerics.Vector mask, ulong* address, System.Numerics.Vector indicies, System.Numerics.Vector data) { throw null; } + public static void Scatter(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter(System.Numerics.Vector mask, ulong* address, System.Numerics.Vector indicies, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter(System.Numerics.Vector mask, float* address, System.Numerics.Vector indicies, System.Numerics.Vector data) { throw null; } + public static void Scatter(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter(System.Numerics.Vector mask, float* address, System.Numerics.Vector indicies, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter(System.Numerics.Vector mask, double* address, System.Numerics.Vector indicies, System.Numerics.Vector data) { throw null; } + public static void Scatter(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter(System.Numerics.Vector mask, double* address, System.Numerics.Vector indicies, System.Numerics.Vector data) { throw null; } + public static void Scatter16BitNarrowing(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static void Scatter16BitNarrowing(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static void Scatter16BitNarrowing(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static void Scatter16BitNarrowing(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, short* address, System.Numerics.Vector indices, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, short* address, System.Numerics.Vector indices, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, short* address, System.Numerics.Vector indices, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, short* address, System.Numerics.Vector indices, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector indices, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector indices, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector indices, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector indices, System.Numerics.Vector data) { throw null; } + public static void Scatter32BitNarrowing(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static void Scatter32BitNarrowing(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices, System.Numerics.Vector data) { throw null; } + public static void Scatter8BitNarrowing(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static void Scatter8BitNarrowing(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static void Scatter8BitNarrowing(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static void Scatter8BitNarrowing(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, byte* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, byte* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, byte* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, byte* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static void SetFfr(System.Numerics.Vector value) { throw null; } + public static void SetFfr(System.Numerics.Vector value) { throw null; } + public static void SetFfr(System.Numerics.Vector value) { throw null; } + public static void SetFfr(System.Numerics.Vector value) { throw null; } + public static void SetFfr(System.Numerics.Vector value) { throw null; } + public static void SetFfr(System.Numerics.Vector value) { throw null; } + public static void SetFfr(System.Numerics.Vector value) { throw null; } + public static void SetFfr(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ShiftLeftLogical(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ShiftLeftLogical(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ShiftLeftLogical(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ShiftLeftLogical(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ShiftLeftLogical(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ShiftLeftLogical(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ShiftLeftLogical(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ShiftLeftLogical(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ShiftLeftLogical(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ShiftLeftLogical(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ShiftLeftLogical(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ShiftLeftLogical(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ShiftLeftLogical(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ShiftLeftLogical(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ShiftRightArithmetic(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ShiftRightArithmetic(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ShiftRightArithmetic(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ShiftRightArithmetic(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ShiftRightArithmetic(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ShiftRightArithmetic(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ShiftRightArithmetic(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticForDivide(System.Numerics.Vector value, [ConstantExpected] byte control) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticForDivide(System.Numerics.Vector value, [ConstantExpected] byte control) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticForDivide(System.Numerics.Vector value, [ConstantExpected] byte control) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticForDivide(System.Numerics.Vector value, [ConstantExpected] byte control) { throw null; } + public static System.Numerics.Vector ShiftRightLogical(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ShiftRightLogical(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ShiftRightLogical(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ShiftRightLogical(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ShiftRightLogical(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ShiftRightLogical(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ShiftRightLogical(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SignExtend16(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SignExtend16(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SignExtend32(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SignExtend8(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SignExtend8(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SignExtend8(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SignExtendWideningLower(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SignExtendWideningLower(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SignExtendWideningLower(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SignExtendWideningUpper(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SignExtendWideningUpper(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SignExtendWideningUpper(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Splice(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Splice(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Splice(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Splice(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Splice(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Splice(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Splice(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Splice(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Splice(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Splice(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Sqrt(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Sqrt(System.Numerics.Vector value) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, sbyte* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, sbyte* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2, System.Numerics.Vector Value3) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, sbyte* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2, System.Numerics.Vector Value3, System.Numerics.Vector Value4) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, short* address, System.Numerics.Vector data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, short* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, short* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2, System.Numerics.Vector Value3) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, short* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2, System.Numerics.Vector Value3, System.Numerics.Vector Value4) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, int* address, System.Numerics.Vector data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, int* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, int* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2, System.Numerics.Vector Value3) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, int* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2, System.Numerics.Vector Value3, System.Numerics.Vector Value4) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, long* address, System.Numerics.Vector data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, long* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, long* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2, System.Numerics.Vector Value3) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, long* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2, System.Numerics.Vector Value3, System.Numerics.Vector Value4) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, byte* address, System.Numerics.Vector data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, byte* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, byte* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2, System.Numerics.Vector Value3) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, byte* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2, System.Numerics.Vector Value3, System.Numerics.Vector Value4) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, ushort* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, ushort* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2, System.Numerics.Vector Value3) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, ushort* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2, System.Numerics.Vector Value3, System.Numerics.Vector Value4) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, uint* address, System.Numerics.Vector data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, uint* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, uint* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2, System.Numerics.Vector Value3) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, uint* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2, System.Numerics.Vector Value3, System.Numerics.Vector Value4) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, ulong* address, System.Numerics.Vector data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, ulong* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, ulong* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2, System.Numerics.Vector Value3) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, ulong* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2, System.Numerics.Vector Value3, System.Numerics.Vector Value4) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, float* address, System.Numerics.Vector data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, float* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, float* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2, System.Numerics.Vector Value3) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, float* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2, System.Numerics.Vector Value3, System.Numerics.Vector Value4) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, double* address, System.Numerics.Vector data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, double* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, double* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2, System.Numerics.Vector Value3) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, double* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2, System.Numerics.Vector Value3, System.Numerics.Vector Value4) data) { throw null; } + public static unsafe void StoreNarrowing(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector data) { throw null; } + public static unsafe void StoreNarrowing(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector data) { throw null; } + public static unsafe void StoreNarrowing(System.Numerics.Vector mask, short* address, System.Numerics.Vector data) { throw null; } + public static unsafe void StoreNarrowing(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector data) { throw null; } + public static unsafe void StoreNarrowing(System.Numerics.Vector mask, short* address, System.Numerics.Vector data) { throw null; } + public static unsafe void StoreNarrowing(System.Numerics.Vector mask, int* address, System.Numerics.Vector data) { throw null; } + public static unsafe void StoreNarrowing(System.Numerics.Vector mask, byte* address, System.Numerics.Vector data) { throw null; } + public static unsafe void StoreNarrowing(System.Numerics.Vector mask, byte* address, System.Numerics.Vector data) { throw null; } + public static unsafe void StoreNarrowing(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector data) { throw null; } + public static unsafe void StoreNarrowing(System.Numerics.Vector mask, byte* address, System.Numerics.Vector data) { throw null; } + public static unsafe void StoreNarrowing(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector data) { throw null; } + public static unsafe void StoreNarrowing(System.Numerics.Vector mask, uint* address, System.Numerics.Vector data) { throw null; } + public static unsafe void StoreNonTemporal(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector data) { throw null; } + public static unsafe void StoreNonTemporal(System.Numerics.Vector mask, short* address, System.Numerics.Vector data) { throw null; } + public static unsafe void StoreNonTemporal(System.Numerics.Vector mask, int* address, System.Numerics.Vector data) { throw null; } + public static unsafe void StoreNonTemporal(System.Numerics.Vector mask, long* address, System.Numerics.Vector data) { throw null; } + public static unsafe void StoreNonTemporal(System.Numerics.Vector mask, byte* address, System.Numerics.Vector data) { throw null; } + public static unsafe void StoreNonTemporal(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector data) { throw null; } + public static unsafe void StoreNonTemporal(System.Numerics.Vector mask, uint* address, System.Numerics.Vector data) { throw null; } + public static unsafe void StoreNonTemporal(System.Numerics.Vector mask, ulong* address, System.Numerics.Vector data) { throw null; } + public static unsafe void StoreNonTemporal(System.Numerics.Vector mask, float* address, System.Numerics.Vector data) { throw null; } + public static unsafe void StoreNonTemporal(System.Numerics.Vector mask, double* address, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector Subtract(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Subtract(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Subtract(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Subtract(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Subtract(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Subtract(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Subtract(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Subtract(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Subtract(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Subtract(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static bool TestAnyTrue(System.Numerics.Vector leftMask, System.Numerics.Vector rightMask) { throw null; } + public static bool TestAnyTrue(System.Numerics.Vector leftMask, System.Numerics.Vector rightMask) { throw null; } + public static bool TestAnyTrue(System.Numerics.Vector leftMask, System.Numerics.Vector rightMask) { throw null; } + public static bool TestAnyTrue(System.Numerics.Vector leftMask, System.Numerics.Vector rightMask) { throw null; } + public static bool TestAnyTrue(System.Numerics.Vector leftMask, System.Numerics.Vector rightMask) { throw null; } + public static bool TestAnyTrue(System.Numerics.Vector leftMask, System.Numerics.Vector rightMask) { throw null; } + public static bool TestAnyTrue(System.Numerics.Vector leftMask, System.Numerics.Vector rightMask) { throw null; } + public static bool TestAnyTrue(System.Numerics.Vector leftMask, System.Numerics.Vector rightMask) { throw null; } + public static bool TestFirstTrue(System.Numerics.Vector leftMask, System.Numerics.Vector rightMask) { throw null; } + public static bool TestFirstTrue(System.Numerics.Vector leftMask, System.Numerics.Vector rightMask) { throw null; } + public static bool TestFirstTrue(System.Numerics.Vector leftMask, System.Numerics.Vector rightMask) { throw null; } + public static bool TestFirstTrue(System.Numerics.Vector leftMask, System.Numerics.Vector rightMask) { throw null; } + public static bool TestFirstTrue(System.Numerics.Vector leftMask, System.Numerics.Vector rightMask) { throw null; } + public static bool TestFirstTrue(System.Numerics.Vector leftMask, System.Numerics.Vector rightMask) { throw null; } + public static bool TestFirstTrue(System.Numerics.Vector leftMask, System.Numerics.Vector rightMask) { throw null; } + public static bool TestFirstTrue(System.Numerics.Vector leftMask, System.Numerics.Vector rightMask) { throw null; } + public static bool TestLastTrue(System.Numerics.Vector leftMask, System.Numerics.Vector rightMask) { throw null; } + public static bool TestLastTrue(System.Numerics.Vector leftMask, System.Numerics.Vector rightMask) { throw null; } + public static bool TestLastTrue(System.Numerics.Vector leftMask, System.Numerics.Vector rightMask) { throw null; } + public static bool TestLastTrue(System.Numerics.Vector leftMask, System.Numerics.Vector rightMask) { throw null; } + public static bool TestLastTrue(System.Numerics.Vector leftMask, System.Numerics.Vector rightMask) { throw null; } + public static bool TestLastTrue(System.Numerics.Vector leftMask, System.Numerics.Vector rightMask) { throw null; } + public static bool TestLastTrue(System.Numerics.Vector leftMask, System.Numerics.Vector rightMask) { throw null; } + public static bool TestLastTrue(System.Numerics.Vector leftMask, System.Numerics.Vector rightMask) { throw null; } + public static System.Numerics.Vector TransposeEven(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector TransposeEven(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector TransposeEven(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector TransposeEven(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector TransposeEven(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector TransposeEven(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector TransposeEven(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector TransposeEven(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector TransposeEven(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector TransposeEven(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector TransposeOdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector TransposeOdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector TransposeOdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector TransposeOdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector TransposeOdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector TransposeOdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector TransposeOdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector TransposeOdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector TransposeOdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector TransposeOdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector TrigonometricMultiplyAddCoefficient(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte control) { throw null; } + public static System.Numerics.Vector TrigonometricMultiplyAddCoefficient(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte control) { throw null; } + public static System.Numerics.Vector TrigonometricSelectCoefficient(System.Numerics.Vector value, System.Numerics.Vector selector) { throw null; } + public static System.Numerics.Vector TrigonometricSelectCoefficient(System.Numerics.Vector value, System.Numerics.Vector selector) { throw null; } + public static System.Numerics.Vector TrigonometricStartingValue(System.Numerics.Vector value, System.Numerics.Vector sign) { throw null; } + public static System.Numerics.Vector TrigonometricStartingValue(System.Numerics.Vector value, System.Numerics.Vector sign) { throw null; } + public static System.Numerics.Vector UnzipEven(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector UnzipEven(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector UnzipEven(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector UnzipEven(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector UnzipEven(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector UnzipEven(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector UnzipEven(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector UnzipEven(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector UnzipEven(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector UnzipEven(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector UnzipOdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector UnzipOdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector UnzipOdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector UnzipOdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector UnzipOdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector UnzipOdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector UnzipOdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector UnzipOdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector UnzipOdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector UnzipOdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector VectorTableLookup(System.Numerics.Vector data, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookup(System.Numerics.Vector data, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookup(System.Numerics.Vector data, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookup(System.Numerics.Vector data, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookup(System.Numerics.Vector data, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookup(System.Numerics.Vector data, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookup(System.Numerics.Vector data, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookup(System.Numerics.Vector data, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookup(System.Numerics.Vector data, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookup(System.Numerics.Vector data, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector Xor(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Xor(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Xor(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Xor(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Xor(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Xor(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Xor(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Xor(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector XorAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector XorAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector XorAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector XorAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector XorAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector XorAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector XorAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector XorAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ZeroExtend16(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ZeroExtend16(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ZeroExtend32(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ZeroExtend8(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ZeroExtend8(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ZeroExtend8(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ZeroExtendWideningLower(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ZeroExtendWideningLower(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ZeroExtendWideningLower(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ZeroExtendWideningUpper(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ZeroExtendWideningUpper(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ZeroExtendWideningUpper(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ZipHigh(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ZipHigh(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ZipHigh(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ZipHigh(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ZipHigh(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ZipHigh(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ZipHigh(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ZipHigh(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ZipHigh(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ZipHigh(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ZipLow(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ZipLow(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ZipLow(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ZipLow(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ZipLow(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ZipLow(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ZipLow(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ZipLow(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ZipLow(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ZipLow(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + diff --git a/sve_api/out_cs_api/Sve.cs b/sve_api/out_cs_api/Sve.cs new file mode 100644 index 0000000000000..cb3fb35f9ad16 --- /dev/null +++ b/sve_api/out_cs_api/Sve.cs @@ -0,0 +1,9649 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class Sve : AdvSimd + { + internal Sve() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// Abs : Absolute value + + /// + /// svint8_t svabs[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) + /// svint8_t svabs[_s8]_x(svbool_t pg, svint8_t op) + /// svint8_t svabs[_s8]_z(svbool_t pg, svint8_t op) + /// + public static unsafe Vector Abs(Vector value) => Abs(value); + + /// + /// svint16_t svabs[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) + /// svint16_t svabs[_s16]_x(svbool_t pg, svint16_t op) + /// svint16_t svabs[_s16]_z(svbool_t pg, svint16_t op) + /// + public static unsafe Vector Abs(Vector value) => Abs(value); + + /// + /// svint32_t svabs[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// svint32_t svabs[_s32]_x(svbool_t pg, svint32_t op) + /// svint32_t svabs[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector Abs(Vector value) => Abs(value); + + /// + /// svint64_t svabs[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// svint64_t svabs[_s64]_x(svbool_t pg, svint64_t op) + /// svint64_t svabs[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector Abs(Vector value) => Abs(value); + + /// + /// svfloat32_t svabs[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) + /// svfloat32_t svabs[_f32]_x(svbool_t pg, svfloat32_t op) + /// svfloat32_t svabs[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector Abs(Vector value) => Abs(value); + + /// + /// svfloat64_t svabs[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) + /// svfloat64_t svabs[_f64]_x(svbool_t pg, svfloat64_t op) + /// svfloat64_t svabs[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector Abs(Vector value) => Abs(value); + + + /// AbsoluteCompareGreaterThan : Absolute compare greater than + + /// + /// svbool_t svacgt[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector AbsoluteCompareGreaterThan(Vector left, Vector right) => AbsoluteCompareGreaterThan(left, right); + + /// + /// svbool_t svacgt[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector AbsoluteCompareGreaterThan(Vector left, Vector right) => AbsoluteCompareGreaterThan(left, right); + + + /// AbsoluteCompareGreaterThanOrEqual : Absolute compare greater than or equal to + + /// + /// svbool_t svacge[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector AbsoluteCompareGreaterThanOrEqual(Vector left, Vector right) => AbsoluteCompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svacge[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector AbsoluteCompareGreaterThanOrEqual(Vector left, Vector right) => AbsoluteCompareGreaterThanOrEqual(left, right); + + + /// AbsoluteCompareLessThan : Absolute compare less than + + /// + /// svbool_t svaclt[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector AbsoluteCompareLessThan(Vector left, Vector right) => AbsoluteCompareLessThan(left, right); + + /// + /// svbool_t svaclt[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector AbsoluteCompareLessThan(Vector left, Vector right) => AbsoluteCompareLessThan(left, right); + + + /// AbsoluteCompareLessThanOrEqual : Absolute compare less than or equal to + + /// + /// svbool_t svacle[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector AbsoluteCompareLessThanOrEqual(Vector left, Vector right) => AbsoluteCompareLessThanOrEqual(left, right); + + /// + /// svbool_t svacle[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector AbsoluteCompareLessThanOrEqual(Vector left, Vector right) => AbsoluteCompareLessThanOrEqual(left, right); + + + /// AbsoluteDifference : Absolute difference + + /// + /// svint8_t svabd[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svabd[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svabd[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) => AbsoluteDifference(left, right); + + /// + /// svint16_t svabd[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svabd[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svabd[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) => AbsoluteDifference(left, right); + + /// + /// svint32_t svabd[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svabd[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svabd[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) => AbsoluteDifference(left, right); + + /// + /// svint64_t svabd[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svabd[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svabd[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) => AbsoluteDifference(left, right); + + /// + /// svuint8_t svabd[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svabd[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svabd[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) => AbsoluteDifference(left, right); + + /// + /// svuint16_t svabd[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svabd[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svabd[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) => AbsoluteDifference(left, right); + + /// + /// svuint32_t svabd[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svabd[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svabd[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) => AbsoluteDifference(left, right); + + /// + /// svuint64_t svabd[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svabd[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svabd[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) => AbsoluteDifference(left, right); + + /// + /// svfloat32_t svabd[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svabd[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svabd[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) => AbsoluteDifference(left, right); + + /// + /// svfloat64_t svabd[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svabd[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svabd[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) => AbsoluteDifference(left, right); + + + /// Add : Add + + /// + /// svint8_t svadd[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svadd[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svadd[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector Add(Vector left, Vector right) => Add(left, right); + + /// + /// svint16_t svadd[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svadd[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svadd[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector Add(Vector left, Vector right) => Add(left, right); + + /// + /// svint32_t svadd[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svadd[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svadd[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector Add(Vector left, Vector right) => Add(left, right); + + /// + /// svint64_t svadd[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svadd[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svadd[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector Add(Vector left, Vector right) => Add(left, right); + + /// + /// svuint8_t svadd[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svadd[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svadd[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector Add(Vector left, Vector right) => Add(left, right); + + /// + /// svuint16_t svadd[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svadd[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svadd[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector Add(Vector left, Vector right) => Add(left, right); + + /// + /// svuint32_t svadd[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svadd[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svadd[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector Add(Vector left, Vector right) => Add(left, right); + + /// + /// svuint64_t svadd[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svadd[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svadd[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector Add(Vector left, Vector right) => Add(left, right); + + /// + /// svfloat32_t svadd[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svadd[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svadd[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector Add(Vector left, Vector right) => Add(left, right); + + /// + /// svfloat64_t svadd[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svadd[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svadd[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector Add(Vector left, Vector right) => Add(left, right); + + + /// AddAcross : Add reduction + + /// + /// int64_t svaddv[_s8](svbool_t pg, svint8_t op) + /// + public static unsafe Vector AddAcross(Vector value) => AddAcross(value); + + /// + /// int64_t svaddv[_s16](svbool_t pg, svint16_t op) + /// + public static unsafe Vector AddAcross(Vector value) => AddAcross(value); + + /// + /// int64_t svaddv[_s32](svbool_t pg, svint32_t op) + /// + public static unsafe Vector AddAcross(Vector value) => AddAcross(value); + + /// + /// int64_t svaddv[_s64](svbool_t pg, svint64_t op) + /// + public static unsafe Vector AddAcross(Vector value) => AddAcross(value); + + /// + /// uint64_t svaddv[_u8](svbool_t pg, svuint8_t op) + /// + public static unsafe Vector AddAcross(Vector value) => AddAcross(value); + + /// + /// uint64_t svaddv[_u16](svbool_t pg, svuint16_t op) + /// + public static unsafe Vector AddAcross(Vector value) => AddAcross(value); + + /// + /// uint64_t svaddv[_u32](svbool_t pg, svuint32_t op) + /// + public static unsafe Vector AddAcross(Vector value) => AddAcross(value); + + /// + /// uint64_t svaddv[_u64](svbool_t pg, svuint64_t op) + /// + public static unsafe Vector AddAcross(Vector value) => AddAcross(value); + + /// + /// float32_t svaddv[_f32](svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector AddAcross(Vector value) => AddAcross(value); + + /// + /// float64_t svaddv[_f64](svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector AddAcross(Vector value) => AddAcross(value); + + + /// AddRotateComplex : Complex add with rotate + + /// + /// svfloat32_t svcadd[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, uint64_t imm_rotation) + /// svfloat32_t svcadd[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, uint64_t imm_rotation) + /// svfloat32_t svcadd[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) => AddRotateComplex(left, right, rotation); + + /// + /// svfloat64_t svcadd[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, uint64_t imm_rotation) + /// svfloat64_t svcadd[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, uint64_t imm_rotation) + /// svfloat64_t svcadd[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) => AddRotateComplex(left, right, rotation); + + + /// AddSaturate : Saturating add + + /// + /// svint8_t svqadd[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svint16_t svqadd[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svint32_t svqadd[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svint64_t svqadd[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svuint8_t svqadd[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svuint16_t svqadd[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svuint32_t svqadd[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svuint64_t svqadd[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + + /// AddSequentialAcross : Add reduction (strictly-ordered) + + /// + /// float32_t svadda[_f32](svbool_t pg, float32_t initial, svfloat32_t op) + /// + public static unsafe Vector AddSequentialAcross(Vector initial, Vector value) => AddSequentialAcross(initial, value); + + /// + /// float64_t svadda[_f64](svbool_t pg, float64_t initial, svfloat64_t op) + /// + public static unsafe Vector AddSequentialAcross(Vector initial, Vector value) => AddSequentialAcross(initial, value); + + + /// And : Bitwise AND + + /// + /// svint8_t svand[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svand[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svand[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector And(Vector left, Vector right) => And(left, right); + + /// + /// svint16_t svand[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svand[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svand[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector And(Vector left, Vector right) => And(left, right); + + /// + /// svint32_t svand[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svand[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svand[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector And(Vector left, Vector right) => And(left, right); + + /// + /// svint64_t svand[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svand[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svand[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector And(Vector left, Vector right) => And(left, right); + + /// + /// svuint8_t svand[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svand[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svand[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector And(Vector left, Vector right) => And(left, right); + + /// + /// svuint16_t svand[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svand[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svand[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector And(Vector left, Vector right) => And(left, right); + + /// + /// svuint32_t svand[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svand[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svand[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector And(Vector left, Vector right) => And(left, right); + + /// + /// svuint64_t svand[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svand[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svand[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector And(Vector left, Vector right) => And(left, right); + + + /// AndAcross : Bitwise AND reduction to scalar + + /// + /// int8_t svandv[_s8](svbool_t pg, svint8_t op) + /// + public static unsafe Vector AndAcross(Vector value) => AndAcross(value); + + /// + /// int16_t svandv[_s16](svbool_t pg, svint16_t op) + /// + public static unsafe Vector AndAcross(Vector value) => AndAcross(value); + + /// + /// int32_t svandv[_s32](svbool_t pg, svint32_t op) + /// + public static unsafe Vector AndAcross(Vector value) => AndAcross(value); + + /// + /// int64_t svandv[_s64](svbool_t pg, svint64_t op) + /// + public static unsafe Vector AndAcross(Vector value) => AndAcross(value); + + /// + /// uint8_t svandv[_u8](svbool_t pg, svuint8_t op) + /// + public static unsafe Vector AndAcross(Vector value) => AndAcross(value); + + /// + /// uint16_t svandv[_u16](svbool_t pg, svuint16_t op) + /// + public static unsafe Vector AndAcross(Vector value) => AndAcross(value); + + /// + /// uint32_t svandv[_u32](svbool_t pg, svuint32_t op) + /// + public static unsafe Vector AndAcross(Vector value) => AndAcross(value); + + /// + /// uint64_t svandv[_u64](svbool_t pg, svuint64_t op) + /// + public static unsafe Vector AndAcross(Vector value) => AndAcross(value); + + + /// AndNot : Bitwise NAND + + /// + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector AndNot(Vector left, Vector right) => AndNot(left, right); + + /// + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector AndNot(Vector left, Vector right) => AndNot(left, right); + + /// + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector AndNot(Vector left, Vector right) => AndNot(left, right); + + /// + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector AndNot(Vector left, Vector right) => AndNot(left, right); + + /// + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector AndNot(Vector left, Vector right) => AndNot(left, right); + + /// + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector AndNot(Vector left, Vector right) => AndNot(left, right); + + /// + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector AndNot(Vector left, Vector right) => AndNot(left, right); + + /// + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector AndNot(Vector left, Vector right) => AndNot(left, right); + + + /// BitwiseClear : Bitwise clear + + /// + /// svint8_t svbic[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svbic[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svbic[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector BitwiseClear(Vector left, Vector right) => BitwiseClear(left, right); + + /// + /// svint16_t svbic[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svbic[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svbic[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector BitwiseClear(Vector left, Vector right) => BitwiseClear(left, right); + + /// + /// svint32_t svbic[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svbic[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svbic[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector BitwiseClear(Vector left, Vector right) => BitwiseClear(left, right); + + /// + /// svint64_t svbic[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svbic[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svbic[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector BitwiseClear(Vector left, Vector right) => BitwiseClear(left, right); + + /// + /// svuint8_t svbic[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svbic[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svbic[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector BitwiseClear(Vector left, Vector right) => BitwiseClear(left, right); + + /// + /// svuint16_t svbic[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svbic[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svbic[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector BitwiseClear(Vector left, Vector right) => BitwiseClear(left, right); + + /// + /// svuint32_t svbic[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svbic[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svbic[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector BitwiseClear(Vector left, Vector right) => BitwiseClear(left, right); + + /// + /// svuint64_t svbic[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svbic[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svbic[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector BitwiseClear(Vector left, Vector right) => BitwiseClear(left, right); + + + /// BooleanNot : Logically invert boolean condition + + /// + /// svint8_t svcnot[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) + /// svint8_t svcnot[_s8]_x(svbool_t pg, svint8_t op) + /// svint8_t svcnot[_s8]_z(svbool_t pg, svint8_t op) + /// + public static unsafe Vector BooleanNot(Vector value) => BooleanNot(value); + + /// + /// svint16_t svcnot[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) + /// svint16_t svcnot[_s16]_x(svbool_t pg, svint16_t op) + /// svint16_t svcnot[_s16]_z(svbool_t pg, svint16_t op) + /// + public static unsafe Vector BooleanNot(Vector value) => BooleanNot(value); + + /// + /// svint32_t svcnot[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// svint32_t svcnot[_s32]_x(svbool_t pg, svint32_t op) + /// svint32_t svcnot[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector BooleanNot(Vector value) => BooleanNot(value); + + /// + /// svint64_t svcnot[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// svint64_t svcnot[_s64]_x(svbool_t pg, svint64_t op) + /// svint64_t svcnot[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector BooleanNot(Vector value) => BooleanNot(value); + + /// + /// svuint8_t svcnot[_u8]_m(svuint8_t inactive, svbool_t pg, svuint8_t op) + /// svuint8_t svcnot[_u8]_x(svbool_t pg, svuint8_t op) + /// svuint8_t svcnot[_u8]_z(svbool_t pg, svuint8_t op) + /// + public static unsafe Vector BooleanNot(Vector value) => BooleanNot(value); + + /// + /// svuint16_t svcnot[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) + /// svuint16_t svcnot[_u16]_x(svbool_t pg, svuint16_t op) + /// svuint16_t svcnot[_u16]_z(svbool_t pg, svuint16_t op) + /// + public static unsafe Vector BooleanNot(Vector value) => BooleanNot(value); + + /// + /// svuint32_t svcnot[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// svuint32_t svcnot[_u32]_x(svbool_t pg, svuint32_t op) + /// svuint32_t svcnot[_u32]_z(svbool_t pg, svuint32_t op) + /// + public static unsafe Vector BooleanNot(Vector value) => BooleanNot(value); + + /// + /// svuint64_t svcnot[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// svuint64_t svcnot[_u64]_x(svbool_t pg, svuint64_t op) + /// svuint64_t svcnot[_u64]_z(svbool_t pg, svuint64_t op) + /// + public static unsafe Vector BooleanNot(Vector value) => BooleanNot(value); + + + + /// Compact : Shuffle active elements of vector to the right and fill with zero + + /// + /// svint32_t svcompact[_s32](svbool_t pg, svint32_t op) + /// + public static unsafe Vector Compact(Vector mask, Vector value) => Compact(mask, value); + + /// + /// svint64_t svcompact[_s64](svbool_t pg, svint64_t op) + /// + public static unsafe Vector Compact(Vector mask, Vector value) => Compact(mask, value); + + /// + /// svuint32_t svcompact[_u32](svbool_t pg, svuint32_t op) + /// + public static unsafe Vector Compact(Vector mask, Vector value) => Compact(mask, value); + + /// + /// svuint64_t svcompact[_u64](svbool_t pg, svuint64_t op) + /// + public static unsafe Vector Compact(Vector mask, Vector value) => Compact(mask, value); + + /// + /// svfloat32_t svcompact[_f32](svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector Compact(Vector mask, Vector value) => Compact(mask, value); + + /// + /// svfloat64_t svcompact[_f64](svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector Compact(Vector mask, Vector value) => Compact(mask, value); + + + /// CompareEqual : Compare equal to + + /// + /// svbool_t svcmpeq[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + /// + /// svbool_t svcmpeq_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + /// + /// svbool_t svcmpeq[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + /// + /// svbool_t svcmpeq_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + /// + /// svbool_t svcmpeq[_s32](svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + /// + /// svbool_t svcmpeq_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + /// + /// svbool_t svcmpeq[_s64](svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + /// + /// svbool_t svcmpeq[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + /// + /// svbool_t svcmpeq[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + /// + /// svbool_t svcmpeq[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + /// + /// svbool_t svcmpeq[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + /// + /// svbool_t svcmpeq[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + /// + /// svbool_t svcmpeq[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + + /// CompareGreaterThan : Compare greater than + + /// + /// svbool_t svcmpgt[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt[_s32](svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt[_s64](svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt_wide[_u8](svbool_t pg, svuint8_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt_wide[_u16](svbool_t pg, svuint16_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt_wide[_u32](svbool_t pg, svuint32_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + + /// CompareGreaterThanOrEqual : Compare greater than or equal to + + /// + /// svbool_t svcmpge[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge[_s32](svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge[_s64](svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge_wide[_u8](svbool_t pg, svuint8_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge_wide[_u16](svbool_t pg, svuint16_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge_wide[_u32](svbool_t pg, svuint32_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + + /// CompareLessThan : Compare less than + + /// + /// svbool_t svcmplt[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt[_s32](svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt[_s64](svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt_wide[_u8](svbool_t pg, svuint8_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt_wide[_u16](svbool_t pg, svuint16_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt_wide[_u32](svbool_t pg, svuint32_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + + /// CompareLessThanOrEqual : Compare less than or equal to + + /// + /// svbool_t svcmple[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple[_s32](svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple[_s64](svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple_wide[_u8](svbool_t pg, svuint8_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple_wide[_u16](svbool_t pg, svuint16_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple_wide[_u32](svbool_t pg, svuint32_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + + /// CompareNotEqualTo : Compare not equal to + + /// + /// svbool_t svcmpne[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + /// + /// svbool_t svcmpne_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + /// + /// svbool_t svcmpne[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + /// + /// svbool_t svcmpne_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + /// + /// svbool_t svcmpne[_s32](svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + /// + /// svbool_t svcmpne_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + /// + /// svbool_t svcmpne[_s64](svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + /// + /// svbool_t svcmpne[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + /// + /// svbool_t svcmpne[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + /// + /// svbool_t svcmpne[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + /// + /// svbool_t svcmpne[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + /// + /// svbool_t svcmpne[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + /// + /// svbool_t svcmpne[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + + /// CompareUnordered : Compare unordered with + + /// + /// svbool_t svcmpuo[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector CompareUnordered(Vector left, Vector right) => CompareUnordered(left, right); + + /// + /// svbool_t svcmpuo[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector CompareUnordered(Vector left, Vector right) => CompareUnordered(left, right); + + + /// Compute16BitAddresses : Compute vector addresses for 16-bit data + + /// + /// svuint32_t svadrh[_u32base]_[s32]index(svuint32_t bases, svint32_t indices) + /// + public static unsafe Vector Compute16BitAddresses(Vector bases, Vector indices) => Compute16BitAddresses(bases, indices); + + /// + /// svuint32_t svadrh[_u32base]_[u32]index(svuint32_t bases, svuint32_t indices) + /// + public static unsafe Vector Compute16BitAddresses(Vector bases, Vector indices) => Compute16BitAddresses(bases, indices); + + /// + /// svuint64_t svadrh[_u64base]_[s64]index(svuint64_t bases, svint64_t indices) + /// + public static unsafe Vector Compute16BitAddresses(Vector bases, Vector indices) => Compute16BitAddresses(bases, indices); + + /// + /// svuint64_t svadrh[_u64base]_[u64]index(svuint64_t bases, svuint64_t indices) + /// + public static unsafe Vector Compute16BitAddresses(Vector bases, Vector indices) => Compute16BitAddresses(bases, indices); + + + /// Compute32BitAddresses : Compute vector addresses for 32-bit data + + /// + /// svuint32_t svadrw[_u32base]_[s32]index(svuint32_t bases, svint32_t indices) + /// + public static unsafe Vector Compute32BitAddresses(Vector bases, Vector indices) => Compute32BitAddresses(bases, indices); + + /// + /// svuint32_t svadrw[_u32base]_[u32]index(svuint32_t bases, svuint32_t indices) + /// + public static unsafe Vector Compute32BitAddresses(Vector bases, Vector indices) => Compute32BitAddresses(bases, indices); + + /// + /// svuint64_t svadrw[_u64base]_[s64]index(svuint64_t bases, svint64_t indices) + /// + public static unsafe Vector Compute32BitAddresses(Vector bases, Vector indices) => Compute32BitAddresses(bases, indices); + + /// + /// svuint64_t svadrw[_u64base]_[u64]index(svuint64_t bases, svuint64_t indices) + /// + public static unsafe Vector Compute32BitAddresses(Vector bases, Vector indices) => Compute32BitAddresses(bases, indices); + + + /// Compute64BitAddresses : Compute vector addresses for 64-bit data + + /// + /// svuint32_t svadrd[_u32base]_[s32]index(svuint32_t bases, svint32_t indices) + /// + public static unsafe Vector Compute64BitAddresses(Vector bases, Vector indices) => Compute64BitAddresses(bases, indices); + + /// + /// svuint32_t svadrd[_u32base]_[u32]index(svuint32_t bases, svuint32_t indices) + /// + public static unsafe Vector Compute64BitAddresses(Vector bases, Vector indices) => Compute64BitAddresses(bases, indices); + + /// + /// svuint64_t svadrd[_u64base]_[s64]index(svuint64_t bases, svint64_t indices) + /// + public static unsafe Vector Compute64BitAddresses(Vector bases, Vector indices) => Compute64BitAddresses(bases, indices); + + /// + /// svuint64_t svadrd[_u64base]_[u64]index(svuint64_t bases, svuint64_t indices) + /// + public static unsafe Vector Compute64BitAddresses(Vector bases, Vector indices) => Compute64BitAddresses(bases, indices); + + + /// Compute8BitAddresses : Compute vector addresses for 8-bit data + + /// + /// svuint32_t svadrb[_u32base]_[s32]offset(svuint32_t bases, svint32_t offsets) + /// + public static unsafe Vector Compute8BitAddresses(Vector bases, Vector indices) => Compute8BitAddresses(bases, indices); + + /// + /// svuint32_t svadrb[_u32base]_[u32]offset(svuint32_t bases, svuint32_t offsets) + /// + public static unsafe Vector Compute8BitAddresses(Vector bases, Vector indices) => Compute8BitAddresses(bases, indices); + + /// + /// svuint64_t svadrb[_u64base]_[s64]offset(svuint64_t bases, svint64_t offsets) + /// + public static unsafe Vector Compute8BitAddresses(Vector bases, Vector indices) => Compute8BitAddresses(bases, indices); + + /// + /// svuint64_t svadrb[_u64base]_[u64]offset(svuint64_t bases, svuint64_t offsets) + /// + public static unsafe Vector Compute8BitAddresses(Vector bases, Vector indices) => Compute8BitAddresses(bases, indices); + + + /// ConditionalExtractAfterLastActiveElement : Conditionally extract element after last + + /// + /// svint8_t svclasta[_s8](svbool_t pg, svint8_t fallback, svint8_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValue, data); + + /// + /// svint8_t svclasta[_s8](svbool_t pg, svint8_t fallback, svint8_t data) + /// int8_t svclasta[_n_s8](svbool_t pg, int8_t fallback, svint8_t data) + /// + public static unsafe sbyte ConditionalExtractAfterLastActiveElement(Vector mask, sbyte defaultValues, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValues, data); + + /// + /// svint16_t svclasta[_s16](svbool_t pg, svint16_t fallback, svint16_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValue, data); + + /// + /// svint16_t svclasta[_s16](svbool_t pg, svint16_t fallback, svint16_t data) + /// int16_t svclasta[_n_s16](svbool_t pg, int16_t fallback, svint16_t data) + /// + public static unsafe short ConditionalExtractAfterLastActiveElement(Vector mask, short defaultValues, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValues, data); + + /// + /// svint32_t svclasta[_s32](svbool_t pg, svint32_t fallback, svint32_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValue, data); + + /// + /// svint32_t svclasta[_s32](svbool_t pg, svint32_t fallback, svint32_t data) + /// int32_t svclasta[_n_s32](svbool_t pg, int32_t fallback, svint32_t data) + /// + public static unsafe int ConditionalExtractAfterLastActiveElement(Vector mask, int defaultValues, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValues, data); + + /// + /// svint64_t svclasta[_s64](svbool_t pg, svint64_t fallback, svint64_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValue, data); + + /// + /// svint64_t svclasta[_s64](svbool_t pg, svint64_t fallback, svint64_t data) + /// int64_t svclasta[_n_s64](svbool_t pg, int64_t fallback, svint64_t data) + /// + public static unsafe long ConditionalExtractAfterLastActiveElement(Vector mask, long defaultValues, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValues, data); + + /// + /// svuint8_t svclasta[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValue, data); + + /// + /// svuint8_t svclasta[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data) + /// uint8_t svclasta[_n_u8](svbool_t pg, uint8_t fallback, svuint8_t data) + /// + public static unsafe byte ConditionalExtractAfterLastActiveElement(Vector mask, byte defaultValues, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValues, data); + + /// + /// svuint16_t svclasta[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValue, data); + + /// + /// svuint16_t svclasta[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data) + /// uint16_t svclasta[_n_u16](svbool_t pg, uint16_t fallback, svuint16_t data) + /// + public static unsafe ushort ConditionalExtractAfterLastActiveElement(Vector mask, ushort defaultValues, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValues, data); + + /// + /// svuint32_t svclasta[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValue, data); + + /// + /// svuint32_t svclasta[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data) + /// uint32_t svclasta[_n_u32](svbool_t pg, uint32_t fallback, svuint32_t data) + /// + public static unsafe uint ConditionalExtractAfterLastActiveElement(Vector mask, uint defaultValues, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValues, data); + + /// + /// svuint64_t svclasta[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValue, data); + + /// + /// svuint64_t svclasta[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data) + /// uint64_t svclasta[_n_u64](svbool_t pg, uint64_t fallback, svuint64_t data) + /// + public static unsafe ulong ConditionalExtractAfterLastActiveElement(Vector mask, ulong defaultValues, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValues, data); + + /// + /// svfloat32_t svclasta[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValue, data); + + /// + /// svfloat32_t svclasta[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data) + /// float32_t svclasta[_n_f32](svbool_t pg, float32_t fallback, svfloat32_t data) + /// + public static unsafe float ConditionalExtractAfterLastActiveElement(Vector mask, float defaultValues, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValues, data); + + /// + /// svfloat64_t svclasta[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValue, data); + + /// + /// svfloat64_t svclasta[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data) + /// float64_t svclasta[_n_f64](svbool_t pg, float64_t fallback, svfloat64_t data) + /// + public static unsafe double ConditionalExtractAfterLastActiveElement(Vector mask, double defaultValues, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValues, data); + + + /// ConditionalExtractAfterLastActiveElementAndReplicate : Conditionally extract element after last + + /// + /// svint8_t svclasta[_s8](svbool_t pg, svint8_t fallback, svint8_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) => ConditionalExtractAfterLastActiveElementAndReplicate(mask, defaultScalar, data); + + /// + /// svint16_t svclasta[_s16](svbool_t pg, svint16_t fallback, svint16_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) => ConditionalExtractAfterLastActiveElementAndReplicate(mask, defaultScalar, data); + + /// + /// svint32_t svclasta[_s32](svbool_t pg, svint32_t fallback, svint32_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) => ConditionalExtractAfterLastActiveElementAndReplicate(mask, defaultScalar, data); + + /// + /// svint64_t svclasta[_s64](svbool_t pg, svint64_t fallback, svint64_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) => ConditionalExtractAfterLastActiveElementAndReplicate(mask, defaultScalar, data); + + /// + /// svuint8_t svclasta[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) => ConditionalExtractAfterLastActiveElementAndReplicate(mask, defaultScalar, data); + + /// + /// svuint16_t svclasta[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) => ConditionalExtractAfterLastActiveElementAndReplicate(mask, defaultScalar, data); + + /// + /// svuint32_t svclasta[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) => ConditionalExtractAfterLastActiveElementAndReplicate(mask, defaultScalar, data); + + /// + /// svuint64_t svclasta[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) => ConditionalExtractAfterLastActiveElementAndReplicate(mask, defaultScalar, data); + + /// + /// svfloat32_t svclasta[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) => ConditionalExtractAfterLastActiveElementAndReplicate(mask, defaultScalar, data); + + /// + /// svfloat64_t svclasta[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) => ConditionalExtractAfterLastActiveElementAndReplicate(mask, defaultScalar, data); + + + /// ConditionalExtractLastActiveElement : Conditionally extract last element + + /// + /// svint8_t svclastb[_s8](svbool_t pg, svint8_t fallback, svint8_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValue, data); + + /// + /// svint8_t svclastb[_s8](svbool_t pg, svint8_t fallback, svint8_t data) + /// int8_t svclastb[_n_s8](svbool_t pg, int8_t fallback, svint8_t data) + /// + public static unsafe sbyte ConditionalExtractLastActiveElement(Vector mask, sbyte defaultValues, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValues, data); + + /// + /// svint16_t svclastb[_s16](svbool_t pg, svint16_t fallback, svint16_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValue, data); + + /// + /// svint16_t svclastb[_s16](svbool_t pg, svint16_t fallback, svint16_t data) + /// int16_t svclastb[_n_s16](svbool_t pg, int16_t fallback, svint16_t data) + /// + public static unsafe short ConditionalExtractLastActiveElement(Vector mask, short defaultValues, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValues, data); + + /// + /// svint32_t svclastb[_s32](svbool_t pg, svint32_t fallback, svint32_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValue, data); + + /// + /// svint32_t svclastb[_s32](svbool_t pg, svint32_t fallback, svint32_t data) + /// int32_t svclastb[_n_s32](svbool_t pg, int32_t fallback, svint32_t data) + /// + public static unsafe int ConditionalExtractLastActiveElement(Vector mask, int defaultValues, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValues, data); + + /// + /// svint64_t svclastb[_s64](svbool_t pg, svint64_t fallback, svint64_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValue, data); + + /// + /// svint64_t svclastb[_s64](svbool_t pg, svint64_t fallback, svint64_t data) + /// int64_t svclastb[_n_s64](svbool_t pg, int64_t fallback, svint64_t data) + /// + public static unsafe long ConditionalExtractLastActiveElement(Vector mask, long defaultValues, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValues, data); + + /// + /// svuint8_t svclastb[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValue, data); + + /// + /// svuint8_t svclastb[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data) + /// uint8_t svclastb[_n_u8](svbool_t pg, uint8_t fallback, svuint8_t data) + /// + public static unsafe byte ConditionalExtractLastActiveElement(Vector mask, byte defaultValues, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValues, data); + + /// + /// svuint16_t svclastb[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValue, data); + + /// + /// svuint16_t svclastb[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data) + /// uint16_t svclastb[_n_u16](svbool_t pg, uint16_t fallback, svuint16_t data) + /// + public static unsafe ushort ConditionalExtractLastActiveElement(Vector mask, ushort defaultValues, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValues, data); + + /// + /// svuint32_t svclastb[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValue, data); + + /// + /// svuint32_t svclastb[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data) + /// uint32_t svclastb[_n_u32](svbool_t pg, uint32_t fallback, svuint32_t data) + /// + public static unsafe uint ConditionalExtractLastActiveElement(Vector mask, uint defaultValues, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValues, data); + + /// + /// svuint64_t svclastb[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValue, data); + + /// + /// svuint64_t svclastb[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data) + /// uint64_t svclastb[_n_u64](svbool_t pg, uint64_t fallback, svuint64_t data) + /// + public static unsafe ulong ConditionalExtractLastActiveElement(Vector mask, ulong defaultValues, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValues, data); + + /// + /// svfloat32_t svclastb[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValue, data); + + /// + /// svfloat32_t svclastb[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data) + /// float32_t svclastb[_n_f32](svbool_t pg, float32_t fallback, svfloat32_t data) + /// + public static unsafe float ConditionalExtractLastActiveElement(Vector mask, float defaultValues, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValues, data); + + /// + /// svfloat64_t svclastb[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValue, data); + + /// + /// svfloat64_t svclastb[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data) + /// float64_t svclastb[_n_f64](svbool_t pg, float64_t fallback, svfloat64_t data) + /// + public static unsafe double ConditionalExtractLastActiveElement(Vector mask, double defaultValues, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValues, data); + + + /// ConditionalExtractLastActiveElementAndReplicate : Conditionally extract last element + + /// + /// svint8_t svclastb[_s8](svbool_t pg, svint8_t fallback, svint8_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) => ConditionalExtractLastActiveElementAndReplicate(mask, fallback, data); + + /// + /// svint16_t svclastb[_s16](svbool_t pg, svint16_t fallback, svint16_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) => ConditionalExtractLastActiveElementAndReplicate(mask, fallback, data); + + /// + /// svint32_t svclastb[_s32](svbool_t pg, svint32_t fallback, svint32_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) => ConditionalExtractLastActiveElementAndReplicate(mask, fallback, data); + + /// + /// svint64_t svclastb[_s64](svbool_t pg, svint64_t fallback, svint64_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) => ConditionalExtractLastActiveElementAndReplicate(mask, fallback, data); + + /// + /// svuint8_t svclastb[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) => ConditionalExtractLastActiveElementAndReplicate(mask, fallback, data); + + /// + /// svuint16_t svclastb[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) => ConditionalExtractLastActiveElementAndReplicate(mask, fallback, data); + + /// + /// svuint32_t svclastb[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) => ConditionalExtractLastActiveElementAndReplicate(mask, fallback, data); + + /// + /// svuint64_t svclastb[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) => ConditionalExtractLastActiveElementAndReplicate(mask, fallback, data); + + /// + /// svfloat32_t svclastb[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) => ConditionalExtractLastActiveElementAndReplicate(mask, fallback, data); + + /// + /// svfloat64_t svclastb[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) => ConditionalExtractLastActiveElementAndReplicate(mask, fallback, data); + + + /// ConditionalSelect : Conditionally select elements + + /// + /// svint8_t svsel[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) => ConditionalSelect(mask, left, right); + + /// + /// svint16_t svsel[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) => ConditionalSelect(mask, left, right); + + /// + /// svint32_t svsel[_s32](svbool_t pg, svint32_t op1, svint32_t op2) + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) => ConditionalSelect(mask, left, right); + + /// + /// svint64_t svsel[_s64](svbool_t pg, svint64_t op1, svint64_t op2) + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) => ConditionalSelect(mask, left, right); + + /// + /// svuint8_t svsel[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) => ConditionalSelect(mask, left, right); + + /// + /// svuint16_t svsel[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) => ConditionalSelect(mask, left, right); + + /// + /// svuint32_t svsel[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) => ConditionalSelect(mask, left, right); + + /// + /// svuint64_t svsel[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) => ConditionalSelect(mask, left, right); + + /// + /// svfloat32_t svsel[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) => ConditionalSelect(mask, left, right); + + /// + /// svfloat64_t svsel[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) => ConditionalSelect(mask, left, right); + + + /// ConvertToDouble : Floating-point convert + + /// + /// svfloat64_t svcvt_f64[_s32]_m(svfloat64_t inactive, svbool_t pg, svint32_t op) + /// svfloat64_t svcvt_f64[_s32]_x(svbool_t pg, svint32_t op) + /// svfloat64_t svcvt_f64[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector ConvertToDouble(Vector value) => ConvertToDouble(value); + + /// + /// svfloat64_t svcvt_f64[_s64]_m(svfloat64_t inactive, svbool_t pg, svint64_t op) + /// svfloat64_t svcvt_f64[_s64]_x(svbool_t pg, svint64_t op) + /// svfloat64_t svcvt_f64[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector ConvertToDouble(Vector value) => ConvertToDouble(value); + + /// + /// svfloat64_t svcvt_f64[_u32]_m(svfloat64_t inactive, svbool_t pg, svuint32_t op) + /// svfloat64_t svcvt_f64[_u32]_x(svbool_t pg, svuint32_t op) + /// svfloat64_t svcvt_f64[_u32]_z(svbool_t pg, svuint32_t op) + /// + public static unsafe Vector ConvertToDouble(Vector value) => ConvertToDouble(value); + + /// + /// svfloat64_t svcvt_f64[_u64]_m(svfloat64_t inactive, svbool_t pg, svuint64_t op) + /// svfloat64_t svcvt_f64[_u64]_x(svbool_t pg, svuint64_t op) + /// svfloat64_t svcvt_f64[_u64]_z(svbool_t pg, svuint64_t op) + /// + public static unsafe Vector ConvertToDouble(Vector value) => ConvertToDouble(value); + + /// + /// svfloat64_t svcvt_f64[_f32]_m(svfloat64_t inactive, svbool_t pg, svfloat32_t op) + /// svfloat64_t svcvt_f64[_f32]_x(svbool_t pg, svfloat32_t op) + /// svfloat64_t svcvt_f64[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector ConvertToDouble(Vector value) => ConvertToDouble(value); + + + /// ConvertToInt32 : Floating-point convert + + /// + /// svint32_t svcvt_s32[_f32]_m(svint32_t inactive, svbool_t pg, svfloat32_t op) + /// svint32_t svcvt_s32[_f32]_x(svbool_t pg, svfloat32_t op) + /// svint32_t svcvt_s32[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector ConvertToInt32(Vector value) => ConvertToInt32(value); + + /// + /// svint32_t svcvt_s32[_f64]_m(svint32_t inactive, svbool_t pg, svfloat64_t op) + /// svint32_t svcvt_s32[_f64]_x(svbool_t pg, svfloat64_t op) + /// svint32_t svcvt_s32[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector ConvertToInt32(Vector value) => ConvertToInt32(value); + + + /// ConvertToInt64 : Floating-point convert + + /// + /// svint64_t svcvt_s64[_f32]_m(svint64_t inactive, svbool_t pg, svfloat32_t op) + /// svint64_t svcvt_s64[_f32]_x(svbool_t pg, svfloat32_t op) + /// svint64_t svcvt_s64[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector ConvertToInt64(Vector value) => ConvertToInt64(value); + + /// + /// svint64_t svcvt_s64[_f64]_m(svint64_t inactive, svbool_t pg, svfloat64_t op) + /// svint64_t svcvt_s64[_f64]_x(svbool_t pg, svfloat64_t op) + /// svint64_t svcvt_s64[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector ConvertToInt64(Vector value) => ConvertToInt64(value); + + + /// ConvertToSingle : Floating-point convert + + /// + /// svfloat32_t svcvt_f32[_s32]_m(svfloat32_t inactive, svbool_t pg, svint32_t op) + /// svfloat32_t svcvt_f32[_s32]_x(svbool_t pg, svint32_t op) + /// svfloat32_t svcvt_f32[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector ConvertToSingle(Vector value) => ConvertToSingle(value); + + /// + /// svfloat32_t svcvt_f32[_s64]_m(svfloat32_t inactive, svbool_t pg, svint64_t op) + /// svfloat32_t svcvt_f32[_s64]_x(svbool_t pg, svint64_t op) + /// svfloat32_t svcvt_f32[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector ConvertToSingle(Vector value) => ConvertToSingle(value); + + /// + /// svfloat32_t svcvt_f32[_u32]_m(svfloat32_t inactive, svbool_t pg, svuint32_t op) + /// svfloat32_t svcvt_f32[_u32]_x(svbool_t pg, svuint32_t op) + /// svfloat32_t svcvt_f32[_u32]_z(svbool_t pg, svuint32_t op) + /// + public static unsafe Vector ConvertToSingle(Vector value) => ConvertToSingle(value); + + /// + /// svfloat32_t svcvt_f32[_u64]_m(svfloat32_t inactive, svbool_t pg, svuint64_t op) + /// svfloat32_t svcvt_f32[_u64]_x(svbool_t pg, svuint64_t op) + /// svfloat32_t svcvt_f32[_u64]_z(svbool_t pg, svuint64_t op) + /// + public static unsafe Vector ConvertToSingle(Vector value) => ConvertToSingle(value); + + /// + /// svfloat32_t svcvt_f32[_f64]_m(svfloat32_t inactive, svbool_t pg, svfloat64_t op) + /// svfloat32_t svcvt_f32[_f64]_x(svbool_t pg, svfloat64_t op) + /// svfloat32_t svcvt_f32[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector ConvertToSingle(Vector value) => ConvertToSingle(value); + + + /// ConvertToUInt32 : Floating-point convert + + /// + /// svuint32_t svcvt_u32[_f32]_m(svuint32_t inactive, svbool_t pg, svfloat32_t op) + /// svuint32_t svcvt_u32[_f32]_x(svbool_t pg, svfloat32_t op) + /// svuint32_t svcvt_u32[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector ConvertToUInt32(Vector value) => ConvertToUInt32(value); + + /// + /// svuint32_t svcvt_u32[_f64]_m(svuint32_t inactive, svbool_t pg, svfloat64_t op) + /// svuint32_t svcvt_u32[_f64]_x(svbool_t pg, svfloat64_t op) + /// svuint32_t svcvt_u32[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector ConvertToUInt32(Vector value) => ConvertToUInt32(value); + + + /// ConvertToUInt64 : Floating-point convert + + /// + /// svuint64_t svcvt_u64[_f32]_m(svuint64_t inactive, svbool_t pg, svfloat32_t op) + /// svuint64_t svcvt_u64[_f32]_x(svbool_t pg, svfloat32_t op) + /// svuint64_t svcvt_u64[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector ConvertToUInt64(Vector value) => ConvertToUInt64(value); + + /// + /// svuint64_t svcvt_u64[_f64]_m(svuint64_t inactive, svbool_t pg, svfloat64_t op) + /// svuint64_t svcvt_u64[_f64]_x(svbool_t pg, svfloat64_t op) + /// svuint64_t svcvt_u64[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector ConvertToUInt64(Vector value) => ConvertToUInt64(value); + + + /// Count16BitElements : Count the number of 16-bit elements in a vector + + /// + /// uint64_t svcnth_pat(enum svpattern pattern) + /// + public static unsafe ulong Count16BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => Count16BitElements(pattern); + + + /// Count32BitElements : Count the number of 32-bit elements in a vector + + /// + /// uint64_t svcntw_pat(enum svpattern pattern) + /// + public static unsafe ulong Count32BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => Count32BitElements(pattern); + + + /// Count64BitElements : Count the number of 64-bit elements in a vector + + /// + /// uint64_t svcntd_pat(enum svpattern pattern) + /// + public static unsafe ulong Count64BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => Count64BitElements(pattern); + + + /// Count8BitElements : Count the number of 8-bit elements in a vector + + /// + /// uint64_t svcntb_pat(enum svpattern pattern) + /// + public static unsafe ulong Count8BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => Count8BitElements(pattern); + + + + /// CreateBreakAfterMask : Break after first true condition + + /// + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask) => CreateBreakAfterMask(totalMask, fromMask); + + /// + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask) => CreateBreakAfterMask(totalMask, fromMask); + + /// + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask) => CreateBreakAfterMask(totalMask, fromMask); + + /// + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask) => CreateBreakAfterMask(totalMask, fromMask); + + /// + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask) => CreateBreakAfterMask(totalMask, fromMask); + + /// + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask) => CreateBreakAfterMask(totalMask, fromMask); + + /// + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask) => CreateBreakAfterMask(totalMask, fromMask); + + /// + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask) => CreateBreakAfterMask(totalMask, fromMask); + + + /// CreateBreakAfterPropagateMask : Break after first true condition, propagating from previous partition + + /// + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right) => CreateBreakAfterPropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right) => CreateBreakAfterPropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right) => CreateBreakAfterPropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right) => CreateBreakAfterPropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right) => CreateBreakAfterPropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right) => CreateBreakAfterPropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right) => CreateBreakAfterPropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right) => CreateBreakAfterPropagateMask(mask, left, right); + + + /// CreateBreakBeforeMask : Break before first true condition + + /// + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask) => CreateBreakBeforeMask(totalMask, fromMask); + + /// + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask) => CreateBreakBeforeMask(totalMask, fromMask); + + /// + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask) => CreateBreakBeforeMask(totalMask, fromMask); + + /// + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask) => CreateBreakBeforeMask(totalMask, fromMask); + + /// + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask) => CreateBreakBeforeMask(totalMask, fromMask); + + /// + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask) => CreateBreakBeforeMask(totalMask, fromMask); + + /// + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask) => CreateBreakBeforeMask(totalMask, fromMask); + + /// + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask) => CreateBreakBeforeMask(totalMask, fromMask); + + + /// CreateBreakBeforePropagateMask : Break before first true condition, propagating from previous partition + + /// + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right) => CreateBreakBeforePropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right) => CreateBreakBeforePropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right) => CreateBreakBeforePropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right) => CreateBreakBeforePropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right) => CreateBreakBeforePropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right) => CreateBreakBeforePropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right) => CreateBreakBeforePropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right) => CreateBreakBeforePropagateMask(mask, left, right); + + + /// CreateBreakPropagateMask : Propagate break to next partition + + /// + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask) => CreateBreakPropagateMask(totalMask, fromMask); + + /// + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask) => CreateBreakPropagateMask(totalMask, fromMask); + + /// + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask) => CreateBreakPropagateMask(totalMask, fromMask); + + /// + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask) => CreateBreakPropagateMask(totalMask, fromMask); + + /// + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask) => CreateBreakPropagateMask(totalMask, fromMask); + + /// + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask) => CreateBreakPropagateMask(totalMask, fromMask); + + /// + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask) => CreateBreakPropagateMask(totalMask, fromMask); + + /// + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask) => CreateBreakPropagateMask(totalMask, fromMask); + + + /// CreateFalseMaskByte : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// + public static unsafe Vector CreateFalseMaskByte() => CreateFalseMaskByte(); + + + /// CreateFalseMaskDouble : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// + public static unsafe Vector CreateFalseMaskDouble() => CreateFalseMaskDouble(); + + + /// CreateFalseMaskInt16 : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// + public static unsafe Vector CreateFalseMaskInt16() => CreateFalseMaskInt16(); + + + /// CreateFalseMaskInt32 : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// + public static unsafe Vector CreateFalseMaskInt32() => CreateFalseMaskInt32(); + + + /// CreateFalseMaskInt64 : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// + public static unsafe Vector CreateFalseMaskInt64() => CreateFalseMaskInt64(); + + + /// CreateFalseMaskSByte : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// + public static unsafe Vector CreateFalseMaskSByte() => CreateFalseMaskSByte(); + + + /// CreateFalseMaskSingle : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// + public static unsafe Vector CreateFalseMaskSingle() => CreateFalseMaskSingle(); + + + /// CreateFalseMaskUInt16 : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// + public static unsafe Vector CreateFalseMaskUInt16() => CreateFalseMaskUInt16(); + + + /// CreateFalseMaskUInt32 : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// + public static unsafe Vector CreateFalseMaskUInt32() => CreateFalseMaskUInt32(); + + + /// CreateFalseMaskUInt64 : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// + public static unsafe Vector CreateFalseMaskUInt64() => CreateFalseMaskUInt64(); + + + /// CreateMaskForFirstActiveElement : Set the first active predicate element to true + + /// + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask) => CreateMaskForFirstActiveElement(totalMask, fromMask); + + /// + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask) => CreateMaskForFirstActiveElement(totalMask, fromMask); + + /// + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask) => CreateMaskForFirstActiveElement(totalMask, fromMask); + + /// + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask) => CreateMaskForFirstActiveElement(totalMask, fromMask); + + /// + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask) => CreateMaskForFirstActiveElement(totalMask, fromMask); + + /// + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask) => CreateMaskForFirstActiveElement(totalMask, fromMask); + + /// + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask) => CreateMaskForFirstActiveElement(totalMask, fromMask); + + /// + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask) => CreateMaskForFirstActiveElement(totalMask, fromMask); + + + /// CreateMaskForNextActiveElement : Find next active predicate + + /// + /// svbool_t svpnext_b8(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateMaskForNextActiveElement(Vector totalMask, Vector fromMask) => CreateMaskForNextActiveElement(totalMask, fromMask); + + /// + /// svbool_t svpnext_b16(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateMaskForNextActiveElement(Vector totalMask, Vector fromMask) => CreateMaskForNextActiveElement(totalMask, fromMask); + + /// + /// svbool_t svpnext_b32(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateMaskForNextActiveElement(Vector totalMask, Vector fromMask) => CreateMaskForNextActiveElement(totalMask, fromMask); + + /// + /// svbool_t svpnext_b64(svbool_t pg, svbool_t op) + /// + public static unsafe Vector CreateMaskForNextActiveElement(Vector totalMask, Vector fromMask) => CreateMaskForNextActiveElement(totalMask, fromMask); + + + + /// CreateTrueMaskByte : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b8(enum svpattern pattern) + /// + public static unsafe Vector CreateTrueMaskByte([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => CreateTrueMaskByte(pattern); + + + /// CreateTrueMaskDouble : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b8(enum svpattern pattern) + /// + public static unsafe Vector CreateTrueMaskDouble([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => CreateTrueMaskDouble(pattern); + + + /// CreateTrueMaskInt16 : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b8(enum svpattern pattern) + /// + public static unsafe Vector CreateTrueMaskInt16([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => CreateTrueMaskInt16(pattern); + + + /// CreateTrueMaskInt32 : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b8(enum svpattern pattern) + /// + public static unsafe Vector CreateTrueMaskInt32([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => CreateTrueMaskInt32(pattern); + + + /// CreateTrueMaskInt64 : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b8(enum svpattern pattern) + /// + public static unsafe Vector CreateTrueMaskInt64([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => CreateTrueMaskInt64(pattern); + + + /// CreateTrueMaskSByte : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b8(enum svpattern pattern) + /// + public static unsafe Vector CreateTrueMaskSByte([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => CreateTrueMaskSByte(pattern); + + + /// CreateTrueMaskSingle : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b8(enum svpattern pattern) + /// + public static unsafe Vector CreateTrueMaskSingle([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => CreateTrueMaskSingle(pattern); + + + /// CreateTrueMaskUInt16 : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b16(enum svpattern pattern) + /// + public static unsafe Vector CreateTrueMaskUInt16([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => CreateTrueMaskUInt16(pattern); + + + /// CreateTrueMaskUInt32 : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b32(enum svpattern pattern) + /// + public static unsafe Vector CreateTrueMaskUInt32([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => CreateTrueMaskUInt32(pattern); + + + /// CreateTrueMaskUInt64 : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b64(enum svpattern pattern) + /// + public static unsafe Vector CreateTrueMaskUInt64([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => CreateTrueMaskUInt64(pattern); + + + /// CreateWhileLessThanMask16Bit : While incrementing scalar is less than + + /// + /// svbool_t svwhilelt_b16[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask16Bit(int left, int right) => CreateWhileLessThanMask16Bit(left, right); + + /// + /// svbool_t svwhilelt_b16[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask16Bit(long left, long right) => CreateWhileLessThanMask16Bit(left, right); + + /// + /// svbool_t svwhilelt_b16[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask16Bit(uint left, uint right) => CreateWhileLessThanMask16Bit(left, right); + + /// + /// svbool_t svwhilelt_b16[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask16Bit(ulong left, ulong right) => CreateWhileLessThanMask16Bit(left, right); + + + /// CreateWhileLessThanMask32Bit : While incrementing scalar is less than + + /// + /// svbool_t svwhilelt_b32[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask32Bit(int left, int right) => CreateWhileLessThanMask32Bit(left, right); + + /// + /// svbool_t svwhilelt_b32[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask32Bit(long left, long right) => CreateWhileLessThanMask32Bit(left, right); + + /// + /// svbool_t svwhilelt_b32[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask32Bit(uint left, uint right) => CreateWhileLessThanMask32Bit(left, right); + + /// + /// svbool_t svwhilelt_b32[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask32Bit(ulong left, ulong right) => CreateWhileLessThanMask32Bit(left, right); + + + /// CreateWhileLessThanMask64Bit : While incrementing scalar is less than + + /// + /// svbool_t svwhilelt_b64[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask64Bit(int left, int right) => CreateWhileLessThanMask64Bit(left, right); + + /// + /// svbool_t svwhilelt_b64[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask64Bit(long left, long right) => CreateWhileLessThanMask64Bit(left, right); + + /// + /// svbool_t svwhilelt_b64[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask64Bit(uint left, uint right) => CreateWhileLessThanMask64Bit(left, right); + + /// + /// svbool_t svwhilelt_b64[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask64Bit(ulong left, ulong right) => CreateWhileLessThanMask64Bit(left, right); + + + /// CreateWhileLessThanMask8Bit : While incrementing scalar is less than + + /// + /// svbool_t svwhilelt_b8[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask8Bit(int left, int right) => CreateWhileLessThanMask8Bit(left, right); + + /// + /// svbool_t svwhilelt_b8[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask8Bit(long left, long right) => CreateWhileLessThanMask8Bit(left, right); + + /// + /// svbool_t svwhilelt_b8[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask8Bit(uint left, uint right) => CreateWhileLessThanMask8Bit(left, right); + + /// + /// svbool_t svwhilelt_b8[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileLessThanMask8Bit(ulong left, ulong right) => CreateWhileLessThanMask8Bit(left, right); + + + /// CreateWhileLessThanOrEqualMask16Bit : While incrementing scalar is less than or equal to + + /// + /// svbool_t svwhilele_b16[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask16Bit(int left, int right) => CreateWhileLessThanOrEqualMask16Bit(left, right); + + /// + /// svbool_t svwhilele_b16[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask16Bit(long left, long right) => CreateWhileLessThanOrEqualMask16Bit(left, right); + + /// + /// svbool_t svwhilele_b16[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask16Bit(uint left, uint right) => CreateWhileLessThanOrEqualMask16Bit(left, right); + + /// + /// svbool_t svwhilele_b16[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask16Bit(ulong left, ulong right) => CreateWhileLessThanOrEqualMask16Bit(left, right); + + + /// CreateWhileLessThanOrEqualMask32Bit : While incrementing scalar is less than or equal to + + /// + /// svbool_t svwhilele_b32[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask32Bit(int left, int right) => CreateWhileLessThanOrEqualMask32Bit(left, right); + + /// + /// svbool_t svwhilele_b32[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask32Bit(long left, long right) => CreateWhileLessThanOrEqualMask32Bit(left, right); + + /// + /// svbool_t svwhilele_b32[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask32Bit(uint left, uint right) => CreateWhileLessThanOrEqualMask32Bit(left, right); + + /// + /// svbool_t svwhilele_b32[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask32Bit(ulong left, ulong right) => CreateWhileLessThanOrEqualMask32Bit(left, right); + + + /// CreateWhileLessThanOrEqualMask64Bit : While incrementing scalar is less than or equal to + + /// + /// svbool_t svwhilele_b64[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask64Bit(int left, int right) => CreateWhileLessThanOrEqualMask64Bit(left, right); + + /// + /// svbool_t svwhilele_b64[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask64Bit(long left, long right) => CreateWhileLessThanOrEqualMask64Bit(left, right); + + /// + /// svbool_t svwhilele_b64[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask64Bit(uint left, uint right) => CreateWhileLessThanOrEqualMask64Bit(left, right); + + /// + /// svbool_t svwhilele_b64[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask64Bit(ulong left, ulong right) => CreateWhileLessThanOrEqualMask64Bit(left, right); + + + /// CreateWhileLessThanOrEqualMask8Bit : While incrementing scalar is less than or equal to + + /// + /// svbool_t svwhilele_b8[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask8Bit(int left, int right) => CreateWhileLessThanOrEqualMask8Bit(left, right); + + /// + /// svbool_t svwhilele_b8[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask8Bit(long left, long right) => CreateWhileLessThanOrEqualMask8Bit(left, right); + + /// + /// svbool_t svwhilele_b8[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask8Bit(uint left, uint right) => CreateWhileLessThanOrEqualMask8Bit(left, right); + + /// + /// svbool_t svwhilele_b8[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask8Bit(ulong left, ulong right) => CreateWhileLessThanOrEqualMask8Bit(left, right); + + + /// Divide : Divide + + /// + /// svint32_t svdiv[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svdiv[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svdiv[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector Divide(Vector left, Vector right) => Divide(left, right); + + /// + /// svint64_t svdiv[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svdiv[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svdiv[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector Divide(Vector left, Vector right) => Divide(left, right); + + /// + /// svuint32_t svdiv[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svdiv[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svdiv[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector Divide(Vector left, Vector right) => Divide(left, right); + + /// + /// svuint64_t svdiv[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svdiv[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svdiv[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector Divide(Vector left, Vector right) => Divide(left, right); + + /// + /// svfloat32_t svdiv[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svdiv[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svdiv[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector Divide(Vector left, Vector right) => Divide(left, right); + + /// + /// svfloat64_t svdiv[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svdiv[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svdiv[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector Divide(Vector left, Vector right) => Divide(left, right); + + + + /// DotProduct : Dot product + + /// + /// svint32_t svdot[_s32](svint32_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector DotProduct(Vector addend, Vector left, Vector right) => DotProduct(addend, left, right); + + /// + /// svint64_t svdot[_s64](svint64_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector DotProduct(Vector addend, Vector left, Vector right) => DotProduct(addend, left, right); + + /// + /// svuint32_t svdot[_u32](svuint32_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector DotProduct(Vector addend, Vector left, Vector right) => DotProduct(addend, left, right); + + /// + /// svuint64_t svdot[_u64](svuint64_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector DotProduct(Vector addend, Vector left, Vector right) => DotProduct(addend, left, right); + + + /// DotProductBySelectedScalar : Dot product + + /// + /// svint32_t svdot_lane[_s32](svint32_t op1, svint8_t op2, svint8_t op3, uint64_t imm_index) + /// + public static unsafe Vector DotProductBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => DotProductBySelectedScalar(addend, left, right, rightIndex); + + /// + /// svint64_t svdot_lane[_s64](svint64_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector DotProductBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => DotProductBySelectedScalar(addend, left, right, rightIndex); + + /// + /// svuint32_t svdot_lane[_u32](svuint32_t op1, svuint8_t op2, svuint8_t op3, uint64_t imm_index) + /// + public static unsafe Vector DotProductBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => DotProductBySelectedScalar(addend, left, right, rightIndex); + + /// + /// svuint64_t svdot_lane[_u64](svuint64_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector DotProductBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => DotProductBySelectedScalar(addend, left, right, rightIndex); + + + /// DuplicateSelectedScalarToVector : Broadcast a scalar value + + /// + /// svint8_t svdup_lane[_s8](svint8_t data, uint8_t index) + /// svint8_t svdupq_lane[_s8](svint8_t data, uint64_t index) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(data, index); + + /// + /// svint16_t svdup_lane[_s16](svint16_t data, uint16_t index) + /// svint16_t svdupq_lane[_s16](svint16_t data, uint64_t index) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(data, index); + + /// + /// svint32_t svdup_lane[_s32](svint32_t data, uint32_t index) + /// svint32_t svdupq_lane[_s32](svint32_t data, uint64_t index) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(data, index); + + /// + /// svint64_t svdup_lane[_s64](svint64_t data, uint64_t index) + /// svint64_t svdupq_lane[_s64](svint64_t data, uint64_t index) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(data, index); + + /// + /// svuint8_t svdup_lane[_u8](svuint8_t data, uint8_t index) + /// svuint8_t svdupq_lane[_u8](svuint8_t data, uint64_t index) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(data, index); + + /// + /// svuint16_t svdup_lane[_u16](svuint16_t data, uint16_t index) + /// svuint16_t svdupq_lane[_u16](svuint16_t data, uint64_t index) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(data, index); + + /// + /// svuint32_t svdup_lane[_u32](svuint32_t data, uint32_t index) + /// svuint32_t svdupq_lane[_u32](svuint32_t data, uint64_t index) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(data, index); + + /// + /// svuint64_t svdup_lane[_u64](svuint64_t data, uint64_t index) + /// svuint64_t svdupq_lane[_u64](svuint64_t data, uint64_t index) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(data, index); + + /// + /// svfloat32_t svdup_lane[_f32](svfloat32_t data, uint32_t index) + /// svfloat32_t svdupq_lane[_f32](svfloat32_t data, uint64_t index) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(data, index); + + /// + /// svfloat64_t svdup_lane[_f64](svfloat64_t data, uint64_t index) + /// svfloat64_t svdupq_lane[_f64](svfloat64_t data, uint64_t index) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(data, index); + + + /// ExtractAfterLastScalar : Extract element after last + + /// + /// int8_t svlasta[_s8](svbool_t pg, svint8_t op) + /// + public static unsafe sbyte ExtractAfterLastScalar(Vector value) => ExtractAfterLastScalar(value); + + /// + /// int16_t svlasta[_s16](svbool_t pg, svint16_t op) + /// + public static unsafe short ExtractAfterLastScalar(Vector value) => ExtractAfterLastScalar(value); + + /// + /// int32_t svlasta[_s32](svbool_t pg, svint32_t op) + /// + public static unsafe int ExtractAfterLastScalar(Vector value) => ExtractAfterLastScalar(value); + + /// + /// int64_t svlasta[_s64](svbool_t pg, svint64_t op) + /// + public static unsafe long ExtractAfterLastScalar(Vector value) => ExtractAfterLastScalar(value); + + /// + /// uint8_t svlasta[_u8](svbool_t pg, svuint8_t op) + /// + public static unsafe byte ExtractAfterLastScalar(Vector value) => ExtractAfterLastScalar(value); + + /// + /// uint16_t svlasta[_u16](svbool_t pg, svuint16_t op) + /// + public static unsafe ushort ExtractAfterLastScalar(Vector value) => ExtractAfterLastScalar(value); + + /// + /// uint32_t svlasta[_u32](svbool_t pg, svuint32_t op) + /// + public static unsafe uint ExtractAfterLastScalar(Vector value) => ExtractAfterLastScalar(value); + + /// + /// uint64_t svlasta[_u64](svbool_t pg, svuint64_t op) + /// + public static unsafe ulong ExtractAfterLastScalar(Vector value) => ExtractAfterLastScalar(value); + + /// + /// float32_t svlasta[_f32](svbool_t pg, svfloat32_t op) + /// + public static unsafe float ExtractAfterLastScalar(Vector value) => ExtractAfterLastScalar(value); + + /// + /// float64_t svlasta[_f64](svbool_t pg, svfloat64_t op) + /// + public static unsafe double ExtractAfterLastScalar(Vector value) => ExtractAfterLastScalar(value); + + + /// ExtractAfterLastVector : Extract element after last + + /// + /// int8_t svlasta[_s8](svbool_t pg, svint8_t op) + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) => ExtractAfterLastVector(value); + + /// + /// int16_t svlasta[_s16](svbool_t pg, svint16_t op) + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) => ExtractAfterLastVector(value); + + /// + /// int32_t svlasta[_s32](svbool_t pg, svint32_t op) + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) => ExtractAfterLastVector(value); + + /// + /// int64_t svlasta[_s64](svbool_t pg, svint64_t op) + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) => ExtractAfterLastVector(value); + + /// + /// uint8_t svlasta[_u8](svbool_t pg, svuint8_t op) + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) => ExtractAfterLastVector(value); + + /// + /// uint16_t svlasta[_u16](svbool_t pg, svuint16_t op) + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) => ExtractAfterLastVector(value); + + /// + /// uint32_t svlasta[_u32](svbool_t pg, svuint32_t op) + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) => ExtractAfterLastVector(value); + + /// + /// uint64_t svlasta[_u64](svbool_t pg, svuint64_t op) + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) => ExtractAfterLastVector(value); + + /// + /// float32_t svlasta[_f32](svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) => ExtractAfterLastVector(value); + + /// + /// float64_t svlasta[_f64](svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) => ExtractAfterLastVector(value); + + + /// ExtractLastScalar : Extract last element + + /// + /// int8_t svlastb[_s8](svbool_t pg, svint8_t op) + /// + public static unsafe sbyte ExtractLastScalar(Vector value) => ExtractLastScalar(value); + + /// + /// int16_t svlastb[_s16](svbool_t pg, svint16_t op) + /// + public static unsafe short ExtractLastScalar(Vector value) => ExtractLastScalar(value); + + /// + /// int32_t svlastb[_s32](svbool_t pg, svint32_t op) + /// + public static unsafe int ExtractLastScalar(Vector value) => ExtractLastScalar(value); + + /// + /// int64_t svlastb[_s64](svbool_t pg, svint64_t op) + /// + public static unsafe long ExtractLastScalar(Vector value) => ExtractLastScalar(value); + + /// + /// uint8_t svlastb[_u8](svbool_t pg, svuint8_t op) + /// + public static unsafe byte ExtractLastScalar(Vector value) => ExtractLastScalar(value); + + /// + /// uint16_t svlastb[_u16](svbool_t pg, svuint16_t op) + /// + public static unsafe ushort ExtractLastScalar(Vector value) => ExtractLastScalar(value); + + /// + /// uint32_t svlastb[_u32](svbool_t pg, svuint32_t op) + /// + public static unsafe uint ExtractLastScalar(Vector value) => ExtractLastScalar(value); + + /// + /// uint64_t svlastb[_u64](svbool_t pg, svuint64_t op) + /// + public static unsafe ulong ExtractLastScalar(Vector value) => ExtractLastScalar(value); + + /// + /// float32_t svlastb[_f32](svbool_t pg, svfloat32_t op) + /// + public static unsafe float ExtractLastScalar(Vector value) => ExtractLastScalar(value); + + /// + /// float64_t svlastb[_f64](svbool_t pg, svfloat64_t op) + /// + public static unsafe double ExtractLastScalar(Vector value) => ExtractLastScalar(value); + + + /// ExtractLastVector : Extract last element + + /// + /// int8_t svlastb[_s8](svbool_t pg, svint8_t op) + /// + public static unsafe Vector ExtractLastVector(Vector value) => ExtractLastVector(value); + + /// + /// int16_t svlastb[_s16](svbool_t pg, svint16_t op) + /// + public static unsafe Vector ExtractLastVector(Vector value) => ExtractLastVector(value); + + /// + /// int32_t svlastb[_s32](svbool_t pg, svint32_t op) + /// + public static unsafe Vector ExtractLastVector(Vector value) => ExtractLastVector(value); + + /// + /// int64_t svlastb[_s64](svbool_t pg, svint64_t op) + /// + public static unsafe Vector ExtractLastVector(Vector value) => ExtractLastVector(value); + + /// + /// uint8_t svlastb[_u8](svbool_t pg, svuint8_t op) + /// + public static unsafe Vector ExtractLastVector(Vector value) => ExtractLastVector(value); + + /// + /// uint16_t svlastb[_u16](svbool_t pg, svuint16_t op) + /// + public static unsafe Vector ExtractLastVector(Vector value) => ExtractLastVector(value); + + /// + /// uint32_t svlastb[_u32](svbool_t pg, svuint32_t op) + /// + public static unsafe Vector ExtractLastVector(Vector value) => ExtractLastVector(value); + + /// + /// uint64_t svlastb[_u64](svbool_t pg, svuint64_t op) + /// + public static unsafe Vector ExtractLastVector(Vector value) => ExtractLastVector(value); + + /// + /// float32_t svlastb[_f32](svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector ExtractLastVector(Vector value) => ExtractLastVector(value); + + /// + /// float64_t svlastb[_f64](svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector ExtractLastVector(Vector value) => ExtractLastVector(value); + + + /// ExtractVector : Extract vector from pair of vectors + + /// + /// svint8_t svext[_s8](svint8_t op1, svint8_t op2, uint64_t imm3) + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) => ExtractVector(upper, lower, index); + + /// + /// svint16_t svext[_s16](svint16_t op1, svint16_t op2, uint64_t imm3) + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) => ExtractVector(upper, lower, index); + + /// + /// svint32_t svext[_s32](svint32_t op1, svint32_t op2, uint64_t imm3) + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) => ExtractVector(upper, lower, index); + + /// + /// svint64_t svext[_s64](svint64_t op1, svint64_t op2, uint64_t imm3) + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) => ExtractVector(upper, lower, index); + + /// + /// svuint8_t svext[_u8](svuint8_t op1, svuint8_t op2, uint64_t imm3) + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) => ExtractVector(upper, lower, index); + + /// + /// svuint16_t svext[_u16](svuint16_t op1, svuint16_t op2, uint64_t imm3) + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) => ExtractVector(upper, lower, index); + + /// + /// svuint32_t svext[_u32](svuint32_t op1, svuint32_t op2, uint64_t imm3) + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) => ExtractVector(upper, lower, index); + + /// + /// svuint64_t svext[_u64](svuint64_t op1, svuint64_t op2, uint64_t imm3) + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) => ExtractVector(upper, lower, index); + + /// + /// svfloat32_t svext[_f32](svfloat32_t op1, svfloat32_t op2, uint64_t imm3) + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) => ExtractVector(upper, lower, index); + + /// + /// svfloat64_t svext[_f64](svfloat64_t op1, svfloat64_t op2, uint64_t imm3) + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) => ExtractVector(upper, lower, index); + + + /// FloatingPointExponentialAccelerator : Floating-point exponential accelerator + + /// + /// svfloat32_t svexpa[_f32](svuint32_t op) + /// + public static unsafe Vector FloatingPointExponentialAccelerator(Vector value) => FloatingPointExponentialAccelerator(value); + + /// + /// svfloat64_t svexpa[_f64](svuint64_t op) + /// + public static unsafe Vector FloatingPointExponentialAccelerator(Vector value) => FloatingPointExponentialAccelerator(value); + + + /// FusedMultiplyAdd : Multiply-add, addend first + + /// + /// svfloat32_t svmla[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// svfloat32_t svmla[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// svfloat32_t svmla[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// + public static unsafe Vector FusedMultiplyAdd(Vector addend, Vector left, Vector right) => FusedMultiplyAdd(addend, left, right); + + /// + /// svfloat64_t svmla[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// svfloat64_t svmla[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// svfloat64_t svmla[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// + public static unsafe Vector FusedMultiplyAdd(Vector addend, Vector left, Vector right) => FusedMultiplyAdd(addend, left, right); + + + /// FusedMultiplyAddBySelectedScalar : Multiply-add, addend first + + /// + /// svfloat32_t svmla_lane[_f32](svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_index) + /// + public static unsafe Vector FusedMultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => FusedMultiplyAddBySelectedScalar(addend, left, right, rightIndex); + + /// + /// svfloat64_t svmla_lane[_f64](svfloat64_t op1, svfloat64_t op2, svfloat64_t op3, uint64_t imm_index) + /// + public static unsafe Vector FusedMultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => FusedMultiplyAddBySelectedScalar(addend, left, right, rightIndex); + + + /// FusedMultiplyAddNegated : Negated multiply-add, addend first + + /// + /// svfloat32_t svnmla[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// svfloat32_t svnmla[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// svfloat32_t svnmla[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// + public static unsafe Vector FusedMultiplyAddNegated(Vector addend, Vector left, Vector right) => FusedMultiplyAddNegated(addend, left, right); + + /// + /// svfloat64_t svnmla[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// svfloat64_t svnmla[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// svfloat64_t svnmla[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// + public static unsafe Vector FusedMultiplyAddNegated(Vector addend, Vector left, Vector right) => FusedMultiplyAddNegated(addend, left, right); + + + /// FusedMultiplySubtract : Multiply-subtract, minuend first + + /// + /// svfloat32_t svmls[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// svfloat32_t svmls[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// svfloat32_t svmls[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// + public static unsafe Vector FusedMultiplySubtract(Vector minuend, Vector left, Vector right) => FusedMultiplySubtract(minuend, left, right); + + /// + /// svfloat64_t svmls[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// svfloat64_t svmls[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// svfloat64_t svmls[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// + public static unsafe Vector FusedMultiplySubtract(Vector minuend, Vector left, Vector right) => FusedMultiplySubtract(minuend, left, right); + + + /// FusedMultiplySubtractBySelectedScalar : Multiply-subtract, minuend first + + /// + /// svfloat32_t svmls_lane[_f32](svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_index) + /// + public static unsafe Vector FusedMultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => FusedMultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); + + /// + /// svfloat64_t svmls_lane[_f64](svfloat64_t op1, svfloat64_t op2, svfloat64_t op3, uint64_t imm_index) + /// + public static unsafe Vector FusedMultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => FusedMultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); + + + /// FusedMultiplySubtractNegated : Negated multiply-subtract, minuend first + + /// + /// svfloat32_t svnmls[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// svfloat32_t svnmls[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// svfloat32_t svnmls[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// + public static unsafe Vector FusedMultiplySubtractNegated(Vector minuend, Vector left, Vector right) => FusedMultiplySubtractNegated(minuend, left, right); + + /// + /// svfloat64_t svnmls[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// svfloat64_t svnmls[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// svfloat64_t svnmls[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// + public static unsafe Vector FusedMultiplySubtractNegated(Vector minuend, Vector left, Vector right) => FusedMultiplySubtractNegated(minuend, left, right); + + + /// GatherPrefetch16Bit : Prefetch halfwords + + /// + /// void svprfh_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch16Bit(mask, address, indices, prefetchType); + + /// + /// void svprfh_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch16Bit(mask, address, indices, prefetchType); + + /// + /// void svprfh_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch16Bit(mask, addresses, prefetchType); + + /// + /// void svprfh_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch16Bit(mask, address, indices, prefetchType); + + /// + /// void svprfh_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch16Bit(mask, addresses, prefetchType); + + /// + /// void svprfh_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch16Bit(mask, address, indices, prefetchType); + + /// + /// void svprfh_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch16Bit(mask, address, indices, prefetchType); + + /// + /// void svprfh_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch16Bit(mask, address, indices, prefetchType); + + /// + /// void svprfh_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch16Bit(mask, addresses, prefetchType); + + /// + /// void svprfh_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch16Bit(mask, address, indices, prefetchType); + + /// + /// void svprfh_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch16Bit(mask, addresses, prefetchType); + + /// + /// void svprfh_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch16Bit(mask, address, indices, prefetchType); + + + /// GatherPrefetch32Bit : Prefetch words + + /// + /// void svprfw_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch32Bit(mask, address, indices, prefetchType); + + /// + /// void svprfw_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch32Bit(mask, address, indices, prefetchType); + + /// + /// void svprfw_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch32Bit(mask, addresses, prefetchType); + + /// + /// void svprfw_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch32Bit(mask, address, indices, prefetchType); + + /// + /// void svprfw_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch32Bit(mask, addresses, prefetchType); + + /// + /// void svprfw_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch32Bit(mask, address, indices, prefetchType); + + /// + /// void svprfw_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch32Bit(mask, address, indices, prefetchType); + + /// + /// void svprfw_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch32Bit(mask, address, indices, prefetchType); + + /// + /// void svprfw_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch32Bit(mask, addresses, prefetchType); + + /// + /// void svprfw_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch32Bit(mask, address, indices, prefetchType); + + /// + /// void svprfw_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch32Bit(mask, addresses, prefetchType); + + /// + /// void svprfw_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch32Bit(mask, address, indices, prefetchType); + + + /// GatherPrefetch64Bit : Prefetch doublewords + + /// + /// void svprfd_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch64Bit(mask, address, indices, prefetchType); + + /// + /// void svprfd_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch64Bit(mask, address, indices, prefetchType); + + /// + /// void svprfd_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch64Bit(mask, addresses, prefetchType); + + /// + /// void svprfd_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch64Bit(mask, address, indices, prefetchType); + + /// + /// void svprfd_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch64Bit(mask, addresses, prefetchType); + + /// + /// void svprfd_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch64Bit(mask, address, indices, prefetchType); + + /// + /// void svprfd_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch64Bit(mask, address, indices, prefetchType); + + /// + /// void svprfd_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch64Bit(mask, address, indices, prefetchType); + + /// + /// void svprfd_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch64Bit(mask, addresses, prefetchType); + + /// + /// void svprfd_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch64Bit(mask, address, indices, prefetchType); + + /// + /// void svprfd_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch64Bit(mask, addresses, prefetchType); + + /// + /// void svprfd_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op) + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch64Bit(mask, address, indices, prefetchType); + + + /// GatherPrefetch8Bit : Prefetch bytes + + /// + /// void svprfb_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, enum svprfop op) + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch8Bit(mask, address, offsets, prefetchType); + + /// + /// void svprfb_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, enum svprfop op) + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch8Bit(mask, address, offsets, prefetchType); + + /// + /// void svprfb_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch8Bit(mask, addresses, prefetchType); + + /// + /// void svprfb_gather_[u32]offset(svbool_t pg, const void *base, svuint32_t offsets, enum svprfop op) + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch8Bit(mask, address, offsets, prefetchType); + + /// + /// void svprfb_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch8Bit(mask, addresses, prefetchType); + + /// + /// void svprfb_gather_[u64]offset(svbool_t pg, const void *base, svuint64_t offsets, enum svprfop op) + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch8Bit(mask, address, offsets, prefetchType); + + /// + /// void svprfb_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, enum svprfop op) + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch8Bit(mask, address, offsets, prefetchType); + + /// + /// void svprfb_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, enum svprfop op) + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch8Bit(mask, address, offsets, prefetchType); + + /// + /// void svprfb_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch8Bit(mask, addresses, prefetchType); + + /// + /// void svprfb_gather_[u32]offset(svbool_t pg, const void *base, svuint32_t offsets, enum svprfop op) + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch8Bit(mask, address, offsets, prefetchType); + + /// + /// void svprfb_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch8Bit(mask, addresses, prefetchType); + + /// + /// void svprfb_gather_[u64]offset(svbool_t pg, const void *base, svuint64_t offsets, enum svprfop op) + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch8Bit(mask, address, offsets, prefetchType); + + + /// GatherVector : Unextended load + + /// + /// svint32_t svld1_gather_[s32]index[_s32](svbool_t pg, const int32_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVector(Vector mask, int* address, Vector indices) => GatherVector(mask, address, indices); + + /// + /// svint32_t svld1_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVector(Vector mask, Vector addresses) => GatherVector(mask, addresses); + + /// + /// svint32_t svld1_gather_[u32]index[_s32](svbool_t pg, const int32_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVector(Vector mask, int* address, Vector indices) => GatherVector(mask, address, indices); + + /// + /// svint64_t svld1_gather_[s64]index[_s64](svbool_t pg, const int64_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVector(Vector mask, long* address, Vector indices) => GatherVector(mask, address, indices); + + /// + /// svint64_t svld1_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVector(Vector mask, Vector addresses) => GatherVector(mask, addresses); + + /// + /// svint64_t svld1_gather_[u64]index[_s64](svbool_t pg, const int64_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVector(Vector mask, long* address, Vector indices) => GatherVector(mask, address, indices); + + /// + /// svuint32_t svld1_gather_[s32]index[_u32](svbool_t pg, const uint32_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVector(Vector mask, uint* address, Vector indices) => GatherVector(mask, address, indices); + + /// + /// svuint32_t svld1_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVector(Vector mask, Vector addresses) => GatherVector(mask, addresses); + + /// + /// svuint32_t svld1_gather_[u32]index[_u32](svbool_t pg, const uint32_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVector(Vector mask, uint* address, Vector indices) => GatherVector(mask, address, indices); + + /// + /// svuint64_t svld1_gather_[s64]index[_u64](svbool_t pg, const uint64_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVector(Vector mask, ulong* address, Vector indices) => GatherVector(mask, address, indices); + + /// + /// svuint64_t svld1_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVector(Vector mask, Vector addresses) => GatherVector(mask, addresses); + + /// + /// svuint64_t svld1_gather_[u64]index[_u64](svbool_t pg, const uint64_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVector(Vector mask, ulong* address, Vector indices) => GatherVector(mask, address, indices); + + /// + /// svfloat32_t svld1_gather_[s32]index[_f32](svbool_t pg, const float32_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVector(Vector mask, float* address, Vector indices) => GatherVector(mask, address, indices); + + /// + /// svfloat32_t svld1_gather[_u32base]_f32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVector(Vector mask, Vector addresses) => GatherVector(mask, addresses); + + /// + /// svfloat32_t svld1_gather_[u32]index[_f32](svbool_t pg, const float32_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVector(Vector mask, float* address, Vector indices) => GatherVector(mask, address, indices); + + /// + /// svfloat64_t svld1_gather_[s64]index[_f64](svbool_t pg, const float64_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVector(Vector mask, double* address, Vector indices) => GatherVector(mask, address, indices); + + /// + /// svfloat64_t svld1_gather[_u64base]_f64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVector(Vector mask, Vector addresses) => GatherVector(mask, addresses); + + /// + /// svfloat64_t svld1_gather_[u64]index[_f64](svbool_t pg, const float64_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVector(Vector mask, double* address, Vector indices) => GatherVector(mask, address, indices); + + + /// GatherVectorByteZeroExtend : Load 8-bit data and zero-extend + + /// + /// svint32_t svld1ub_gather_[s32]offset_s32(svbool_t pg, const uint8_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices) => GatherVectorByteZeroExtend(mask, address, indices); + + /// + /// svint32_t svld1ub_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, Vector addresses) => GatherVectorByteZeroExtend(mask, addresses); + + /// + /// svint32_t svld1ub_gather_[u32]offset_s32(svbool_t pg, const uint8_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices) => GatherVectorByteZeroExtend(mask, address, indices); + + /// + /// svint64_t svld1ub_gather_[s64]offset_s64(svbool_t pg, const uint8_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices) => GatherVectorByteZeroExtend(mask, address, indices); + + /// + /// svint64_t svld1ub_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, Vector addresses) => GatherVectorByteZeroExtend(mask, addresses); + + /// + /// svint64_t svld1ub_gather_[u64]offset_s64(svbool_t pg, const uint8_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices) => GatherVectorByteZeroExtend(mask, address, indices); + + /// + /// svuint32_t svld1ub_gather_[s32]offset_u32(svbool_t pg, const uint8_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices) => GatherVectorByteZeroExtend(mask, address, indices); + + /// + /// svuint32_t svld1ub_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, Vector addresses) => GatherVectorByteZeroExtend(mask, addresses); + + /// + /// svuint32_t svld1ub_gather_[u32]offset_u32(svbool_t pg, const uint8_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices) => GatherVectorByteZeroExtend(mask, address, indices); + + /// + /// svuint64_t svld1ub_gather_[s64]offset_u64(svbool_t pg, const uint8_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices) => GatherVectorByteZeroExtend(mask, address, indices); + + /// + /// svuint64_t svld1ub_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, Vector addresses) => GatherVectorByteZeroExtend(mask, addresses); + + /// + /// svuint64_t svld1ub_gather_[u64]offset_u64(svbool_t pg, const uint8_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices) => GatherVectorByteZeroExtend(mask, address, indices); + + + /// GatherVectorByteZeroExtendFirstFaulting : Load 8-bit data and zero-extend, first-faulting + + /// + /// svint32_t svldff1ub_gather_[s32]offset_s32(svbool_t pg, const uint8_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svint32_t svldff1ub_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorByteZeroExtendFirstFaulting(mask, addresses); + + /// + /// svint32_t svldff1ub_gather_[u32]offset_s32(svbool_t pg, const uint8_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1ub_gather_[s64]offset_s64(svbool_t pg, const uint8_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1ub_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorByteZeroExtendFirstFaulting(mask, addresses); + + /// + /// svint64_t svldff1ub_gather_[u64]offset_s64(svbool_t pg, const uint8_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint32_t svldff1ub_gather_[s32]offset_u32(svbool_t pg, const uint8_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint32_t svldff1ub_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorByteZeroExtendFirstFaulting(mask, addresses); + + /// + /// svuint32_t svldff1ub_gather_[u32]offset_u32(svbool_t pg, const uint8_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1ub_gather_[s64]offset_u64(svbool_t pg, const uint8_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1ub_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorByteZeroExtendFirstFaulting(mask, addresses); + + /// + /// svuint64_t svldff1ub_gather_[u64]offset_u64(svbool_t pg, const uint8_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendFirstFaulting(mask, address, offsets); + + + /// GatherVectorFirstFaulting : Unextended load, first-faulting + + /// + /// svint32_t svldff1_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses) => GatherVectorFirstFaulting(mask, addresses); + + /// + /// svint32_t svldff1_gather_[s32]index[_s32](svbool_t pg, const int32_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, int* address, Vector indices) => GatherVectorFirstFaulting(mask, address, indices); + + /// + /// svint32_t svldff1_gather_[u32]index[_s32](svbool_t pg, const int32_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, int* address, Vector indices) => GatherVectorFirstFaulting(mask, address, indices); + + /// + /// svint64_t svldff1_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses) => GatherVectorFirstFaulting(mask, addresses); + + /// + /// svint64_t svldff1_gather_[s64]index[_s64](svbool_t pg, const int64_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, long* address, Vector indices) => GatherVectorFirstFaulting(mask, address, indices); + + /// + /// svint64_t svldff1_gather_[u64]index[_s64](svbool_t pg, const int64_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, long* address, Vector indices) => GatherVectorFirstFaulting(mask, address, indices); + + /// + /// svuint32_t svldff1_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses) => GatherVectorFirstFaulting(mask, addresses); + + /// + /// svuint32_t svldff1_gather_[s32]index[_u32](svbool_t pg, const uint32_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, uint* address, Vector indices) => GatherVectorFirstFaulting(mask, address, indices); + + /// + /// svuint32_t svldff1_gather_[u32]index[_u32](svbool_t pg, const uint32_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, uint* address, Vector indices) => GatherVectorFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses) => GatherVectorFirstFaulting(mask, addresses); + + /// + /// svuint64_t svldff1_gather_[s64]index[_u64](svbool_t pg, const uint64_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, ulong* address, Vector indices) => GatherVectorFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1_gather_[u64]index[_u64](svbool_t pg, const uint64_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, ulong* address, Vector indices) => GatherVectorFirstFaulting(mask, address, indices); + + /// + /// svfloat32_t svldff1_gather_[s32]index[_f32](svbool_t pg, const float32_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, float* address, Vector indices) => GatherVectorFirstFaulting(mask, address, indices); + + /// + /// svfloat32_t svldff1_gather[_u32base]_f32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses) => GatherVectorFirstFaulting(mask, addresses); + + /// + /// svfloat32_t svldff1_gather_[u32]index[_f32](svbool_t pg, const float32_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, float* address, Vector indices) => GatherVectorFirstFaulting(mask, address, indices); + + /// + /// svfloat64_t svldff1_gather_[s64]index[_f64](svbool_t pg, const float64_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, double* address, Vector indices) => GatherVectorFirstFaulting(mask, address, indices); + + /// + /// svfloat64_t svldff1_gather[_u64base]_f64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses) => GatherVectorFirstFaulting(mask, addresses); + + /// + /// svfloat64_t svldff1_gather_[u64]index[_f64](svbool_t pg, const float64_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, double* address, Vector indices) => GatherVectorFirstFaulting(mask, address, indices); + + + /// GatherVectorInt16SignExtend : Load 16-bit data and sign-extend + + /// + /// svint32_t svld1sh_gather_[s32]index_s32(svbool_t pg, const int16_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtend(mask, address, indices); + + /// + /// svint32_t svld1sh_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, Vector addresses) => GatherVectorInt16SignExtend(mask, addresses); + + /// + /// svint32_t svld1sh_gather_[u32]index_s32(svbool_t pg, const int16_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtend(mask, address, indices); + + /// + /// svint64_t svld1sh_gather_[s64]index_s64(svbool_t pg, const int16_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtend(mask, address, indices); + + /// + /// svint64_t svld1sh_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, Vector addresses) => GatherVectorInt16SignExtend(mask, addresses); + + /// + /// svint64_t svld1sh_gather_[u64]index_s64(svbool_t pg, const int16_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtend(mask, address, indices); + + /// + /// svuint32_t svld1sh_gather_[s32]index_u32(svbool_t pg, const int16_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtend(mask, address, indices); + + /// + /// svuint32_t svld1sh_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, Vector addresses) => GatherVectorInt16SignExtend(mask, addresses); + + /// + /// svuint32_t svld1sh_gather_[u32]index_u32(svbool_t pg, const int16_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtend(mask, address, indices); + + /// + /// svuint64_t svld1sh_gather_[s64]index_u64(svbool_t pg, const int16_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtend(mask, address, indices); + + /// + /// svuint64_t svld1sh_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, Vector addresses) => GatherVectorInt16SignExtend(mask, addresses); + + /// + /// svuint64_t svld1sh_gather_[u64]index_u64(svbool_t pg, const int16_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtend(mask, address, indices); + + + /// GatherVectorInt16SignExtendFirstFaulting : Load 16-bit data and sign-extend, first-faulting + + /// + /// svint32_t svldff1sh_gather_[s32]index_s32(svbool_t pg, const int16_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtendFirstFaulting(mask, address, indices); + + /// + /// svint32_t svldff1sh_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorInt16SignExtendFirstFaulting(mask, addresses); + + /// + /// svint32_t svldff1sh_gather_[u32]index_s32(svbool_t pg, const int16_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtendFirstFaulting(mask, address, indices); + + /// + /// svint64_t svldff1sh_gather_[s64]index_s64(svbool_t pg, const int16_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtendFirstFaulting(mask, address, indices); + + /// + /// svint64_t svldff1sh_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorInt16SignExtendFirstFaulting(mask, addresses); + + /// + /// svint64_t svldff1sh_gather_[u64]index_s64(svbool_t pg, const int16_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtendFirstFaulting(mask, address, indices); + + /// + /// svuint32_t svldff1sh_gather_[s32]index_u32(svbool_t pg, const int16_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtendFirstFaulting(mask, address, indices); + + /// + /// svuint32_t svldff1sh_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorInt16SignExtendFirstFaulting(mask, addresses); + + /// + /// svuint32_t svldff1sh_gather_[u32]index_u32(svbool_t pg, const int16_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtendFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1sh_gather_[s64]index_u64(svbool_t pg, const int16_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtendFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1sh_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorInt16SignExtendFirstFaulting(mask, addresses); + + /// + /// svuint64_t svldff1sh_gather_[u64]index_u64(svbool_t pg, const int16_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtendFirstFaulting(mask, address, indices); + + + /// GatherVectorInt16WithByteOffsetsSignExtend : Load 16-bit data and sign-extend + + /// + /// svint32_t svld1sh_gather_[s32]offset_s32(svbool_t pg, const int16_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svint32_t svld1sh_gather_[u32]offset_s32(svbool_t pg, const int16_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svint64_t svld1sh_gather_[s64]offset_s64(svbool_t pg, const int16_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svint64_t svld1sh_gather_[u64]offset_s64(svbool_t pg, const int16_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svuint32_t svld1sh_gather_[s32]offset_u32(svbool_t pg, const int16_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svuint32_t svld1sh_gather_[u32]offset_u32(svbool_t pg, const int16_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svuint64_t svld1sh_gather_[s64]offset_u64(svbool_t pg, const int16_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svuint64_t svld1sh_gather_[u64]offset_u64(svbool_t pg, const int16_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtend(mask, address, offsets); + + + /// GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting : Load 16-bit data and sign-extend, first-faulting + + /// + /// svint32_t svldff1sh_gather_[s32]offset_s32(svbool_t pg, const int16_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svint32_t svldff1sh_gather_[u32]offset_s32(svbool_t pg, const int16_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1sh_gather_[s64]offset_s64(svbool_t pg, const int16_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1sh_gather_[u64]offset_s64(svbool_t pg, const int16_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint32_t svldff1sh_gather_[s32]offset_u32(svbool_t pg, const int16_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint32_t svldff1sh_gather_[u32]offset_u32(svbool_t pg, const int16_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1sh_gather_[s64]offset_u64(svbool_t pg, const int16_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1sh_gather_[u64]offset_u64(svbool_t pg, const int16_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + + /// GatherVectorInt32SignExtend : Load 32-bit data and sign-extend + + /// + /// svint64_t svld1sw_gather_[s64]index_s64(svbool_t pg, const int32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtend(mask, address, indices); + + /// + /// svint64_t svld1sw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, Vector addresses) => GatherVectorInt32SignExtend(mask, addresses); + + /// + /// svint64_t svld1sw_gather_[u64]index_s64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtend(mask, address, indices); + + /// + /// svint64_t svld1sw_gather_[s64]index_s64(svbool_t pg, const int32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtend(mask, address, indices); + + /// + /// svint64_t svld1sw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, Vector addresses) => GatherVectorInt32SignExtend(mask, addresses); + + /// + /// svint64_t svld1sw_gather_[u64]index_s64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtend(mask, address, indices); + + /// + /// svuint64_t svld1sw_gather_[s64]index_u64(svbool_t pg, const int32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtend(mask, address, indices); + + /// + /// svuint64_t svld1sw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, Vector addresses) => GatherVectorInt32SignExtend(mask, addresses); + + /// + /// svuint64_t svld1sw_gather_[u64]index_u64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtend(mask, address, indices); + + /// + /// svuint64_t svld1sw_gather_[s64]index_u64(svbool_t pg, const int32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtend(mask, address, indices); + + /// + /// svuint64_t svld1sw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, Vector addresses) => GatherVectorInt32SignExtend(mask, addresses); + + /// + /// svuint64_t svld1sw_gather_[u64]index_u64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtend(mask, address, indices); + + + /// GatherVectorInt32SignExtendFirstFaulting : Load 32-bit data and sign-extend, first-faulting + + /// + /// svint64_t svldff1sw_gather_[s64]index_s64(svbool_t pg, const int32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendFirstFaulting(mask, address, indices); + + /// + /// svint64_t svldff1sw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorInt32SignExtendFirstFaulting(mask, addresses); + + /// + /// svint64_t svldff1sw_gather_[u64]index_s64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendFirstFaulting(mask, address, indices); + + /// + /// svint64_t svldff1sw_gather_[s64]index_s64(svbool_t pg, const int32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendFirstFaulting(mask, address, indices); + + /// + /// svint64_t svldff1sw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorInt32SignExtendFirstFaulting(mask, addresses); + + /// + /// svint64_t svldff1sw_gather_[u64]index_s64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1sw_gather_[s64]index_u64(svbool_t pg, const int32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1sw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorInt32SignExtendFirstFaulting(mask, addresses); + + /// + /// svuint64_t svldff1sw_gather_[u64]index_u64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1sw_gather_[s64]index_u64(svbool_t pg, const int32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1sw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorInt32SignExtendFirstFaulting(mask, addresses); + + /// + /// svuint64_t svldff1sw_gather_[u64]index_u64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendFirstFaulting(mask, address, indices); + + + /// GatherVectorInt32WithByteOffsetsSignExtend : Load 32-bit data and sign-extend + + /// + /// svint64_t svld1sw_gather_[s64]offset_s64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svint64_t svld1sw_gather_[u64]offset_s64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svint64_t svld1sw_gather_[s64]offset_s64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svint64_t svld1sw_gather_[u64]offset_s64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svuint64_t svld1sw_gather_[s64]offset_u64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svuint64_t svld1sw_gather_[u64]offset_u64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svuint64_t svld1sw_gather_[s64]offset_u64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svuint64_t svld1sw_gather_[u64]offset_u64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtend(mask, address, offsets); + + + /// GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting : Load 32-bit data and sign-extend, first-faulting + + /// + /// svint64_t svldff1sw_gather_[s64]offset_s64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1sw_gather_[u64]offset_s64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1sw_gather_[s64]offset_s64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1sw_gather_[u64]offset_s64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1sw_gather_[s64]offset_u64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1sw_gather_[u64]offset_u64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1sw_gather_[s64]offset_u64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1sw_gather_[u64]offset_u64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + + /// GatherVectorSByteSignExtend : Load 8-bit data and sign-extend + + /// + /// svint32_t svld1sb_gather_[s32]offset_s32(svbool_t pg, const int8_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices) => GatherVectorSByteSignExtend(mask, address, indices); + + /// + /// svint32_t svld1sb_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, Vector addresses) => GatherVectorSByteSignExtend(mask, addresses); + + /// + /// svint32_t svld1sb_gather_[u32]offset_s32(svbool_t pg, const int8_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices) => GatherVectorSByteSignExtend(mask, address, indices); + + /// + /// svint64_t svld1sb_gather_[s64]offset_s64(svbool_t pg, const int8_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices) => GatherVectorSByteSignExtend(mask, address, indices); + + /// + /// svint64_t svld1sb_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, Vector addresses) => GatherVectorSByteSignExtend(mask, addresses); + + /// + /// svint64_t svld1sb_gather_[u64]offset_s64(svbool_t pg, const int8_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices) => GatherVectorSByteSignExtend(mask, address, indices); + + /// + /// svuint32_t svld1sb_gather_[s32]offset_u32(svbool_t pg, const int8_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices) => GatherVectorSByteSignExtend(mask, address, indices); + + /// + /// svuint32_t svld1sb_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, Vector addresses) => GatherVectorSByteSignExtend(mask, addresses); + + /// + /// svuint32_t svld1sb_gather_[u32]offset_u32(svbool_t pg, const int8_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices) => GatherVectorSByteSignExtend(mask, address, indices); + + /// + /// svuint64_t svld1sb_gather_[s64]offset_u64(svbool_t pg, const int8_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices) => GatherVectorSByteSignExtend(mask, address, indices); + + /// + /// svuint64_t svld1sb_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, Vector addresses) => GatherVectorSByteSignExtend(mask, addresses); + + /// + /// svuint64_t svld1sb_gather_[u64]offset_u64(svbool_t pg, const int8_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices) => GatherVectorSByteSignExtend(mask, address, indices); + + + /// GatherVectorSByteSignExtendFirstFaulting : Load 8-bit data and sign-extend, first-faulting + + /// + /// svint32_t svldff1sb_gather_[s32]offset_s32(svbool_t pg, const int8_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svint32_t svldff1sb_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorSByteSignExtendFirstFaulting(mask, addresses); + + /// + /// svint32_t svldff1sb_gather_[u32]offset_s32(svbool_t pg, const int8_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1sb_gather_[s64]offset_s64(svbool_t pg, const int8_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1sb_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorSByteSignExtendFirstFaulting(mask, addresses); + + /// + /// svint64_t svldff1sb_gather_[u64]offset_s64(svbool_t pg, const int8_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint32_t svldff1sb_gather_[s32]offset_u32(svbool_t pg, const int8_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint32_t svldff1sb_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorSByteSignExtendFirstFaulting(mask, addresses); + + /// + /// svuint32_t svldff1sb_gather_[u32]offset_u32(svbool_t pg, const int8_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1sb_gather_[s64]offset_u64(svbool_t pg, const int8_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1sb_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorSByteSignExtendFirstFaulting(mask, addresses); + + /// + /// svuint64_t svldff1sb_gather_[u64]offset_u64(svbool_t pg, const int8_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendFirstFaulting(mask, address, offsets); + + + /// GatherVectorUInt16WithByteOffsetsZeroExtend : Load 16-bit data and zero-extend + + /// + /// svint32_t svld1uh_gather_[s32]offset_s32(svbool_t pg, const uint16_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svint32_t svld1uh_gather_[u32]offset_s32(svbool_t pg, const uint16_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svint64_t svld1uh_gather_[s64]offset_s64(svbool_t pg, const uint16_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svint64_t svld1uh_gather_[u64]offset_s64(svbool_t pg, const uint16_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svuint32_t svld1uh_gather_[s32]offset_u32(svbool_t pg, const uint16_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svuint32_t svld1uh_gather_[u32]offset_u32(svbool_t pg, const uint16_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svuint64_t svld1uh_gather_[s64]offset_u64(svbool_t pg, const uint16_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svuint64_t svld1uh_gather_[u64]offset_u64(svbool_t pg, const uint16_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtend(mask, address, offsets); + + + /// GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting : Load 16-bit data and zero-extend, first-faulting + + /// + /// svint32_t svldff1uh_gather_[s32]offset_s32(svbool_t pg, const uint16_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svint32_t svldff1uh_gather_[u32]offset_s32(svbool_t pg, const uint16_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1uh_gather_[s64]offset_s64(svbool_t pg, const uint16_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1uh_gather_[u64]offset_s64(svbool_t pg, const uint16_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint32_t svldff1uh_gather_[s32]offset_u32(svbool_t pg, const uint16_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint32_t svldff1uh_gather_[u32]offset_u32(svbool_t pg, const uint16_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1uh_gather_[s64]offset_u64(svbool_t pg, const uint16_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1uh_gather_[u64]offset_u64(svbool_t pg, const uint16_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + + /// GatherVectorUInt16ZeroExtend : Load 16-bit data and zero-extend + + /// + /// svint32_t svld1uh_gather_[s32]index_s32(svbool_t pg, const uint16_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtend(mask, address, indices); + + /// + /// svint32_t svld1uh_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, Vector addresses) => GatherVectorUInt16ZeroExtend(mask, addresses); + + /// + /// svint32_t svld1uh_gather_[u32]index_s32(svbool_t pg, const uint16_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtend(mask, address, indices); + + /// + /// svint64_t svld1uh_gather_[s64]index_s64(svbool_t pg, const uint16_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtend(mask, address, indices); + + /// + /// svint64_t svld1uh_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, Vector addresses) => GatherVectorUInt16ZeroExtend(mask, addresses); + + /// + /// svint64_t svld1uh_gather_[u64]index_s64(svbool_t pg, const uint16_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtend(mask, address, indices); + + /// + /// svuint32_t svld1uh_gather_[s32]index_u32(svbool_t pg, const uint16_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtend(mask, address, indices); + + /// + /// svuint32_t svld1uh_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, Vector addresses) => GatherVectorUInt16ZeroExtend(mask, addresses); + + /// + /// svuint32_t svld1uh_gather_[u32]index_u32(svbool_t pg, const uint16_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtend(mask, address, indices); + + /// + /// svuint64_t svld1uh_gather_[s64]index_u64(svbool_t pg, const uint16_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtend(mask, address, indices); + + /// + /// svuint64_t svld1uh_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, Vector addresses) => GatherVectorUInt16ZeroExtend(mask, addresses); + + /// + /// svuint64_t svld1uh_gather_[u64]index_u64(svbool_t pg, const uint16_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtend(mask, address, indices); + + + /// GatherVectorUInt16ZeroExtendFirstFaulting : Load 16-bit data and zero-extend, first-faulting + + /// + /// svint32_t svldff1uh_gather_[s32]index_s32(svbool_t pg, const uint16_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svint32_t svldff1uh_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorUInt16ZeroExtendFirstFaulting(mask, addresses); + + /// + /// svint32_t svldff1uh_gather_[u32]index_s32(svbool_t pg, const uint16_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svint64_t svldff1uh_gather_[s64]index_s64(svbool_t pg, const uint16_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svint64_t svldff1uh_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorUInt16ZeroExtendFirstFaulting(mask, addresses); + + /// + /// svint64_t svldff1uh_gather_[u64]index_s64(svbool_t pg, const uint16_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svuint32_t svldff1uh_gather_[s32]index_u32(svbool_t pg, const uint16_t *base, svint32_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svuint32_t svldff1uh_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorUInt16ZeroExtendFirstFaulting(mask, addresses); + + /// + /// svuint32_t svldff1uh_gather_[u32]index_u32(svbool_t pg, const uint16_t *base, svuint32_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1uh_gather_[s64]index_u64(svbool_t pg, const uint16_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1uh_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorUInt16ZeroExtendFirstFaulting(mask, addresses); + + /// + /// svuint64_t svldff1uh_gather_[u64]index_u64(svbool_t pg, const uint16_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtendFirstFaulting(mask, address, indices); + + + /// GatherVectorUInt32WithByteOffsetsZeroExtend : Load 32-bit data and zero-extend + + /// + /// svint64_t svld1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svint64_t svld1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svint64_t svld1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svint64_t svld1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svuint64_t svld1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svuint64_t svld1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svuint64_t svld1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svuint64_t svld1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtend(mask, address, offsets); + + + /// GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting : Load 32-bit data and zero-extend, first-faulting + + /// + /// svint64_t svldff1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + + /// GatherVectorUInt32ZeroExtend : Load 32-bit data and zero-extend + + /// + /// svint64_t svld1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtend(mask, address, indices); + + /// + /// svint64_t svld1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtend(mask, addresses); + + /// + /// svint64_t svld1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtend(mask, address, indices); + + /// + /// svint64_t svld1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtend(mask, address, indices); + + /// + /// svint64_t svld1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtend(mask, addresses); + + /// + /// svint64_t svld1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtend(mask, address, indices); + + /// + /// svuint64_t svld1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtend(mask, address, indices); + + /// + /// svuint64_t svld1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtend(mask, addresses); + + /// + /// svuint64_t svld1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtend(mask, address, indices); + + /// + /// svuint64_t svld1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtend(mask, address, indices); + + /// + /// svuint64_t svld1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtend(mask, addresses); + + /// + /// svuint64_t svld1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtend(mask, address, indices); + + + /// GatherVectorUInt32ZeroExtendFirstFaulting : Load 32-bit data and zero-extend, first-faulting + + /// + /// svint64_t svldff1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svint64_t svldff1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, addresses); + + /// + /// svint64_t svldff1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svint64_t svldff1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svint64_t svldff1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, addresses); + + /// + /// svint64_t svldff1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, addresses); + + /// + /// svuint64_t svldff1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, addresses); + + /// + /// svuint64_t svldff1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, address, indices); + + + /// GatherVectorWithByteOffsetFirstFaulting : Unextended load, first-faulting + + /// + /// svint32_t svldff1_gather_[s32]offset[_s32](svbool_t pg, const int32_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, int* address, Vector offsets) => GatherVectorWithByteOffsetFirstFaulting(mask, address, offsets); + + /// + /// svint32_t svldff1_gather_[u32]offset[_s32](svbool_t pg, const int32_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, int* address, Vector offsets) => GatherVectorWithByteOffsetFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1_gather_[s64]offset[_s64](svbool_t pg, const int64_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, long* address, Vector offsets) => GatherVectorWithByteOffsetFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1_gather_[u64]offset[_s64](svbool_t pg, const int64_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, long* address, Vector offsets) => GatherVectorWithByteOffsetFirstFaulting(mask, address, offsets); + + /// + /// svuint32_t svldff1_gather_[s32]offset[_u32](svbool_t pg, const uint32_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, uint* address, Vector offsets) => GatherVectorWithByteOffsetFirstFaulting(mask, address, offsets); + + /// + /// svuint32_t svldff1_gather_[u32]offset[_u32](svbool_t pg, const uint32_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, uint* address, Vector offsets) => GatherVectorWithByteOffsetFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1_gather_[s64]offset[_u64](svbool_t pg, const uint64_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, ulong* address, Vector offsets) => GatherVectorWithByteOffsetFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1_gather_[u64]offset[_u64](svbool_t pg, const uint64_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, ulong* address, Vector offsets) => GatherVectorWithByteOffsetFirstFaulting(mask, address, offsets); + + /// + /// svfloat32_t svldff1_gather_[s32]offset[_f32](svbool_t pg, const float32_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, float* address, Vector offsets) => GatherVectorWithByteOffsetFirstFaulting(mask, address, offsets); + + /// + /// svfloat32_t svldff1_gather_[u32]offset[_f32](svbool_t pg, const float32_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, float* address, Vector offsets) => GatherVectorWithByteOffsetFirstFaulting(mask, address, offsets); + + /// + /// svfloat64_t svldff1_gather_[s64]offset[_f64](svbool_t pg, const float64_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, double* address, Vector offsets) => GatherVectorWithByteOffsetFirstFaulting(mask, address, offsets); + + /// + /// svfloat64_t svldff1_gather_[u64]offset[_f64](svbool_t pg, const float64_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, double* address, Vector offsets) => GatherVectorWithByteOffsetFirstFaulting(mask, address, offsets); + + + /// GatherVectorWithByteOffsets : Unextended load + + /// + /// svint32_t svld1_gather_[s32]offset[_s32](svbool_t pg, const int32_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, int* address, Vector offsets) => GatherVectorWithByteOffsets(mask, address, offsets); + + /// + /// svint32_t svld1_gather_[u32]offset[_s32](svbool_t pg, const int32_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, int* address, Vector offsets) => GatherVectorWithByteOffsets(mask, address, offsets); + + /// + /// svint64_t svld1_gather_[s64]offset[_s64](svbool_t pg, const int64_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, long* address, Vector offsets) => GatherVectorWithByteOffsets(mask, address, offsets); + + /// + /// svint64_t svld1_gather_[u64]offset[_s64](svbool_t pg, const int64_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, long* address, Vector offsets) => GatherVectorWithByteOffsets(mask, address, offsets); + + /// + /// svuint32_t svld1_gather_[s32]offset[_u32](svbool_t pg, const uint32_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, uint* address, Vector offsets) => GatherVectorWithByteOffsets(mask, address, offsets); + + /// + /// svuint32_t svld1_gather_[u32]offset[_u32](svbool_t pg, const uint32_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, uint* address, Vector offsets) => GatherVectorWithByteOffsets(mask, address, offsets); + + /// + /// svuint64_t svld1_gather_[s64]offset[_u64](svbool_t pg, const uint64_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, ulong* address, Vector offsets) => GatherVectorWithByteOffsets(mask, address, offsets); + + /// + /// svuint64_t svld1_gather_[u64]offset[_u64](svbool_t pg, const uint64_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, ulong* address, Vector offsets) => GatherVectorWithByteOffsets(mask, address, offsets); + + /// + /// svfloat32_t svld1_gather_[s32]offset[_f32](svbool_t pg, const float32_t *base, svint32_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, float* address, Vector offsets) => GatherVectorWithByteOffsets(mask, address, offsets); + + /// + /// svfloat32_t svld1_gather_[u32]offset[_f32](svbool_t pg, const float32_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, float* address, Vector offsets) => GatherVectorWithByteOffsets(mask, address, offsets); + + /// + /// svfloat64_t svld1_gather_[s64]offset[_f64](svbool_t pg, const float64_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, double* address, Vector offsets) => GatherVectorWithByteOffsets(mask, address, offsets); + + /// + /// svfloat64_t svld1_gather_[u64]offset[_f64](svbool_t pg, const float64_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, double* address, Vector offsets) => GatherVectorWithByteOffsets(mask, address, offsets); + + + /// GetActiveElementCount : Count set predicate bits + + /// + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) => GetActiveElementCount(mask, from); + + /// + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) => GetActiveElementCount(mask, from); + + /// + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) => GetActiveElementCount(mask, from); + + /// + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) => GetActiveElementCount(mask, from); + + /// + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) => GetActiveElementCount(mask, from); + + /// + /// uint64_t svcntp_b16(svbool_t pg, svbool_t op) + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) => GetActiveElementCount(mask, from); + + /// + /// uint64_t svcntp_b32(svbool_t pg, svbool_t op) + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) => GetActiveElementCount(mask, from); + + /// + /// uint64_t svcntp_b64(svbool_t pg, svbool_t op) + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) => GetActiveElementCount(mask, from); + + /// + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) => GetActiveElementCount(mask, from); + + /// + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) => GetActiveElementCount(mask, from); + + + /// GetFfr : Read FFR, returning predicate of succesfully loaded elements + + /// + /// svbool_t svrdffr() + /// svbool_t svrdffr_z(svbool_t pg) + /// + public static unsafe Vector GetFfr() => GetFfr(); + + /// + /// svbool_t svrdffr() + /// svbool_t svrdffr_z(svbool_t pg) + /// + public static unsafe Vector GetFfr() => GetFfr(); + + /// + /// svbool_t svrdffr() + /// svbool_t svrdffr_z(svbool_t pg) + /// + public static unsafe Vector GetFfr() => GetFfr(); + + /// + /// svbool_t svrdffr() + /// svbool_t svrdffr_z(svbool_t pg) + /// + public static unsafe Vector GetFfr() => GetFfr(); + + /// + /// svbool_t svrdffr() + /// svbool_t svrdffr_z(svbool_t pg) + /// + public static unsafe Vector GetFfr() => GetFfr(); + + /// + /// svbool_t svrdffr() + /// svbool_t svrdffr_z(svbool_t pg) + /// + public static unsafe Vector GetFfr() => GetFfr(); + + /// + /// svbool_t svrdffr() + /// svbool_t svrdffr_z(svbool_t pg) + /// + public static unsafe Vector GetFfr() => GetFfr(); + + /// + /// svbool_t svrdffr() + /// svbool_t svrdffr_z(svbool_t pg) + /// + public static unsafe Vector GetFfr() => GetFfr(); + + + /// InsertIntoShiftedVector : Insert scalar into shifted vector + + /// + /// svint8_t svinsr[_n_s8](svint8_t op1, int8_t op2) + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, sbyte right) => InsertIntoShiftedVector(left, right); + + /// + /// svint16_t svinsr[_n_s16](svint16_t op1, int16_t op2) + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, short right) => InsertIntoShiftedVector(left, right); + + /// + /// svint32_t svinsr[_n_s32](svint32_t op1, int32_t op2) + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, int right) => InsertIntoShiftedVector(left, right); + + /// + /// svint64_t svinsr[_n_s64](svint64_t op1, int64_t op2) + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, long right) => InsertIntoShiftedVector(left, right); + + /// + /// svuint8_t svinsr[_n_u8](svuint8_t op1, uint8_t op2) + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, byte right) => InsertIntoShiftedVector(left, right); + + /// + /// svuint16_t svinsr[_n_u16](svuint16_t op1, uint16_t op2) + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, ushort right) => InsertIntoShiftedVector(left, right); + + /// + /// svuint32_t svinsr[_n_u32](svuint32_t op1, uint32_t op2) + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, uint right) => InsertIntoShiftedVector(left, right); + + /// + /// svuint64_t svinsr[_n_u64](svuint64_t op1, uint64_t op2) + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, ulong right) => InsertIntoShiftedVector(left, right); + + /// + /// svfloat32_t svinsr[_n_f32](svfloat32_t op1, float32_t op2) + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, float right) => InsertIntoShiftedVector(left, right); + + /// + /// svfloat64_t svinsr[_n_f64](svfloat64_t op1, float64_t op2) + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, double right) => InsertIntoShiftedVector(left, right); + + + /// LeadingSignCount : Count leading sign bits + + /// + /// svuint8_t svcls[_s8]_m(svuint8_t inactive, svbool_t pg, svint8_t op) + /// svuint8_t svcls[_s8]_x(svbool_t pg, svint8_t op) + /// svuint8_t svcls[_s8]_z(svbool_t pg, svint8_t op) + /// + public static unsafe Vector LeadingSignCount(Vector value) => LeadingSignCount(value); + + /// + /// svuint16_t svcls[_s16]_m(svuint16_t inactive, svbool_t pg, svint16_t op) + /// svuint16_t svcls[_s16]_x(svbool_t pg, svint16_t op) + /// svuint16_t svcls[_s16]_z(svbool_t pg, svint16_t op) + /// + public static unsafe Vector LeadingSignCount(Vector value) => LeadingSignCount(value); + + /// + /// svuint32_t svcls[_s32]_m(svuint32_t inactive, svbool_t pg, svint32_t op) + /// svuint32_t svcls[_s32]_x(svbool_t pg, svint32_t op) + /// svuint32_t svcls[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector LeadingSignCount(Vector value) => LeadingSignCount(value); + + /// + /// svuint64_t svcls[_s64]_m(svuint64_t inactive, svbool_t pg, svint64_t op) + /// svuint64_t svcls[_s64]_x(svbool_t pg, svint64_t op) + /// svuint64_t svcls[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector LeadingSignCount(Vector value) => LeadingSignCount(value); + + + /// LeadingZeroCount : Count leading zero bits + + /// + /// svuint8_t svclz[_s8]_m(svuint8_t inactive, svbool_t pg, svint8_t op) + /// svuint8_t svclz[_s8]_x(svbool_t pg, svint8_t op) + /// svuint8_t svclz[_s8]_z(svbool_t pg, svint8_t op) + /// + public static unsafe Vector LeadingZeroCount(Vector value) => LeadingZeroCount(value); + + /// + /// svuint8_t svclz[_u8]_m(svuint8_t inactive, svbool_t pg, svuint8_t op) + /// svuint8_t svclz[_u8]_x(svbool_t pg, svuint8_t op) + /// svuint8_t svclz[_u8]_z(svbool_t pg, svuint8_t op) + /// + public static unsafe Vector LeadingZeroCount(Vector value) => LeadingZeroCount(value); + + /// + /// svuint16_t svclz[_s16]_m(svuint16_t inactive, svbool_t pg, svint16_t op) + /// svuint16_t svclz[_s16]_x(svbool_t pg, svint16_t op) + /// svuint16_t svclz[_s16]_z(svbool_t pg, svint16_t op) + /// + public static unsafe Vector LeadingZeroCount(Vector value) => LeadingZeroCount(value); + + /// + /// svuint16_t svclz[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) + /// svuint16_t svclz[_u16]_x(svbool_t pg, svuint16_t op) + /// svuint16_t svclz[_u16]_z(svbool_t pg, svuint16_t op) + /// + public static unsafe Vector LeadingZeroCount(Vector value) => LeadingZeroCount(value); + + /// + /// svuint32_t svclz[_s32]_m(svuint32_t inactive, svbool_t pg, svint32_t op) + /// svuint32_t svclz[_s32]_x(svbool_t pg, svint32_t op) + /// svuint32_t svclz[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector LeadingZeroCount(Vector value) => LeadingZeroCount(value); + + /// + /// svuint32_t svclz[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// svuint32_t svclz[_u32]_x(svbool_t pg, svuint32_t op) + /// svuint32_t svclz[_u32]_z(svbool_t pg, svuint32_t op) + /// + public static unsafe Vector LeadingZeroCount(Vector value) => LeadingZeroCount(value); + + /// + /// svuint64_t svclz[_s64]_m(svuint64_t inactive, svbool_t pg, svint64_t op) + /// svuint64_t svclz[_s64]_x(svbool_t pg, svint64_t op) + /// svuint64_t svclz[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector LeadingZeroCount(Vector value) => LeadingZeroCount(value); + + /// + /// svuint64_t svclz[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// svuint64_t svclz[_u64]_x(svbool_t pg, svuint64_t op) + /// svuint64_t svclz[_u64]_z(svbool_t pg, svuint64_t op) + /// + public static unsafe Vector LeadingZeroCount(Vector value) => LeadingZeroCount(value); + + + /// LoadVector : Unextended load + + /// + /// svint8_t svld1[_s8](svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVector(Vector mask, sbyte* address) => LoadVector(mask, address); + + /// + /// svint16_t svld1[_s16](svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVector(Vector mask, short* address) => LoadVector(mask, address); + + /// + /// svint32_t svld1[_s32](svbool_t pg, const int32_t *base) + /// + public static unsafe Vector LoadVector(Vector mask, int* address) => LoadVector(mask, address); + + /// + /// svint64_t svld1[_s64](svbool_t pg, const int64_t *base) + /// + public static unsafe Vector LoadVector(Vector mask, long* address) => LoadVector(mask, address); + + /// + /// svuint8_t svld1[_u8](svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVector(Vector mask, byte* address) => LoadVector(mask, address); + + /// + /// svuint16_t svld1[_u16](svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVector(Vector mask, ushort* address) => LoadVector(mask, address); + + /// + /// svuint32_t svld1[_u32](svbool_t pg, const uint32_t *base) + /// + public static unsafe Vector LoadVector(Vector mask, uint* address) => LoadVector(mask, address); + + /// + /// svuint64_t svld1[_u64](svbool_t pg, const uint64_t *base) + /// + public static unsafe Vector LoadVector(Vector mask, ulong* address) => LoadVector(mask, address); + + /// + /// svfloat32_t svld1[_f32](svbool_t pg, const float32_t *base) + /// + public static unsafe Vector LoadVector(Vector mask, float* address) => LoadVector(mask, address); + + /// + /// svfloat64_t svld1[_f64](svbool_t pg, const float64_t *base) + /// + public static unsafe Vector LoadVector(Vector mask, double* address) => LoadVector(mask, address); + + + /// LoadVector128AndReplicateToVector : Load and replicate 128 bits of data + + /// + /// svint8_t svld1rq[_s8](svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, sbyte* address) => LoadVector128AndReplicateToVector(mask, address); + + /// + /// svint16_t svld1rq[_s16](svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, short* address) => LoadVector128AndReplicateToVector(mask, address); + + /// + /// svint32_t svld1rq[_s32](svbool_t pg, const int32_t *base) + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, int* address) => LoadVector128AndReplicateToVector(mask, address); + + /// + /// svint64_t svld1rq[_s64](svbool_t pg, const int64_t *base) + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, long* address) => LoadVector128AndReplicateToVector(mask, address); + + /// + /// svuint8_t svld1rq[_u8](svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, byte* address) => LoadVector128AndReplicateToVector(mask, address); + + /// + /// svuint16_t svld1rq[_u16](svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, ushort* address) => LoadVector128AndReplicateToVector(mask, address); + + /// + /// svuint32_t svld1rq[_u32](svbool_t pg, const uint32_t *base) + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, uint* address) => LoadVector128AndReplicateToVector(mask, address); + + /// + /// svuint64_t svld1rq[_u64](svbool_t pg, const uint64_t *base) + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, ulong* address) => LoadVector128AndReplicateToVector(mask, address); + + /// + /// svfloat32_t svld1rq[_f32](svbool_t pg, const float32_t *base) + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, float* address) => LoadVector128AndReplicateToVector(mask, address); + + /// + /// svfloat64_t svld1rq[_f64](svbool_t pg, const float64_t *base) + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, double* address) => LoadVector128AndReplicateToVector(mask, address); + + + /// LoadVectorByteNonFaultingZeroExtendToInt16 : Load 8-bit data and zero-extend, non-faulting + + /// + /// svint16_t svldnf1ub_s16(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToInt16(byte* address) => LoadVectorByteNonFaultingZeroExtendToInt16(address); + + + /// LoadVectorByteNonFaultingZeroExtendToInt32 : Load 8-bit data and zero-extend, non-faulting + + /// + /// svint32_t svldnf1ub_s32(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToInt32(byte* address) => LoadVectorByteNonFaultingZeroExtendToInt32(address); + + + /// LoadVectorByteNonFaultingZeroExtendToInt64 : Load 8-bit data and zero-extend, non-faulting + + /// + /// svint64_t svldnf1ub_s64(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToInt64(byte* address) => LoadVectorByteNonFaultingZeroExtendToInt64(address); + + + /// LoadVectorByteNonFaultingZeroExtendToUInt16 : Load 8-bit data and zero-extend, non-faulting + + /// + /// svuint16_t svldnf1ub_u16(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToUInt16(byte* address) => LoadVectorByteNonFaultingZeroExtendToUInt16(address); + + + /// LoadVectorByteNonFaultingZeroExtendToUInt32 : Load 8-bit data and zero-extend, non-faulting + + /// + /// svuint32_t svldnf1ub_u32(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToUInt32(byte* address) => LoadVectorByteNonFaultingZeroExtendToUInt32(address); + + + /// LoadVectorByteNonFaultingZeroExtendToUInt64 : Load 8-bit data and zero-extend, non-faulting + + /// + /// svuint64_t svldnf1ub_u64(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToUInt64(byte* address) => LoadVectorByteNonFaultingZeroExtendToUInt64(address); + + + /// LoadVectorByteZeroExtendFirstFaulting : Load 8-bit data and zero-extend, first-faulting + + /// + /// svint16_t svldff1ub_s16(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address) => LoadVectorByteZeroExtendFirstFaulting(mask, address); + + /// + /// svint32_t svldff1ub_s32(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address) => LoadVectorByteZeroExtendFirstFaulting(mask, address); + + /// + /// svint64_t svldff1ub_s64(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address) => LoadVectorByteZeroExtendFirstFaulting(mask, address); + + /// + /// svuint16_t svldff1ub_u16(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address) => LoadVectorByteZeroExtendFirstFaulting(mask, address); + + /// + /// svuint32_t svldff1ub_u32(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address) => LoadVectorByteZeroExtendFirstFaulting(mask, address); + + /// + /// svuint64_t svldff1ub_u64(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address) => LoadVectorByteZeroExtendFirstFaulting(mask, address); + + + /// LoadVectorByteZeroExtendToInt16 : Load 8-bit data and zero-extend + + /// + /// svint16_t svld1ub_s16(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteZeroExtendToInt16(Vector mask, byte* address) => LoadVectorByteZeroExtendToInt16(mask, address); + + + /// LoadVectorByteZeroExtendToInt32 : Load 8-bit data and zero-extend + + /// + /// svint32_t svld1ub_s32(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteZeroExtendToInt32(Vector mask, byte* address) => LoadVectorByteZeroExtendToInt32(mask, address); + + + /// LoadVectorByteZeroExtendToInt64 : Load 8-bit data and zero-extend + + /// + /// svint64_t svld1ub_s64(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteZeroExtendToInt64(Vector mask, byte* address) => LoadVectorByteZeroExtendToInt64(mask, address); + + + /// LoadVectorByteZeroExtendToUInt16 : Load 8-bit data and zero-extend + + /// + /// svuint16_t svld1ub_u16(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteZeroExtendToUInt16(Vector mask, byte* address) => LoadVectorByteZeroExtendToUInt16(mask, address); + + + /// LoadVectorByteZeroExtendToUInt32 : Load 8-bit data and zero-extend + + /// + /// svuint32_t svld1ub_u32(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteZeroExtendToUInt32(Vector mask, byte* address) => LoadVectorByteZeroExtendToUInt32(mask, address); + + + /// LoadVectorByteZeroExtendToUInt64 : Load 8-bit data and zero-extend + + /// + /// svuint64_t svld1ub_u64(svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorByteZeroExtendToUInt64(Vector mask, byte* address) => LoadVectorByteZeroExtendToUInt64(mask, address); + + + /// LoadVectorFirstFaulting : Unextended load, first-faulting + + /// + /// svint8_t svldff1[_s8](svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, sbyte* address) => LoadVectorFirstFaulting(mask, address); + + /// + /// svint16_t svldff1[_s16](svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, short* address) => LoadVectorFirstFaulting(mask, address); + + /// + /// svint32_t svldff1[_s32](svbool_t pg, const int32_t *base) + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, int* address) => LoadVectorFirstFaulting(mask, address); + + /// + /// svint64_t svldff1[_s64](svbool_t pg, const int64_t *base) + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, long* address) => LoadVectorFirstFaulting(mask, address); + + /// + /// svuint8_t svldff1[_u8](svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, byte* address) => LoadVectorFirstFaulting(mask, address); + + /// + /// svuint16_t svldff1[_u16](svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, ushort* address) => LoadVectorFirstFaulting(mask, address); + + /// + /// svuint32_t svldff1[_u32](svbool_t pg, const uint32_t *base) + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, uint* address) => LoadVectorFirstFaulting(mask, address); + + /// + /// svuint64_t svldff1[_u64](svbool_t pg, const uint64_t *base) + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, ulong* address) => LoadVectorFirstFaulting(mask, address); + + /// + /// svfloat32_t svldff1[_f32](svbool_t pg, const float32_t *base) + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, float* address) => LoadVectorFirstFaulting(mask, address); + + /// + /// svfloat64_t svldff1[_f64](svbool_t pg, const float64_t *base) + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, double* address) => LoadVectorFirstFaulting(mask, address); + + + /// LoadVectorInt16NonFaultingSignExtendToInt32 : Load 16-bit data and sign-extend, non-faulting + + /// + /// svint32_t svldnf1sh_s32(svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToInt32(short* address) => LoadVectorInt16NonFaultingSignExtendToInt32(address); + + + /// LoadVectorInt16NonFaultingSignExtendToInt64 : Load 16-bit data and sign-extend, non-faulting + + /// + /// svint64_t svldnf1sh_s64(svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToInt64(short* address) => LoadVectorInt16NonFaultingSignExtendToInt64(address); + + + /// LoadVectorInt16NonFaultingSignExtendToUInt32 : Load 16-bit data and sign-extend, non-faulting + + /// + /// svuint32_t svldnf1sh_u32(svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToUInt32(short* address) => LoadVectorInt16NonFaultingSignExtendToUInt32(address); + + + /// LoadVectorInt16NonFaultingSignExtendToUInt64 : Load 16-bit data and sign-extend, non-faulting + + /// + /// svuint64_t svldnf1sh_u64(svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToUInt64(short* address) => LoadVectorInt16NonFaultingSignExtendToUInt64(address); + + + /// LoadVectorInt16SignExtendFirstFaulting : Load 16-bit data and sign-extend, first-faulting + + /// + /// svint32_t svldff1sh_s32(svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorInt16SignExtendFirstFaulting(Vector mask, short* address) => LoadVectorInt16SignExtendFirstFaulting(mask, address); + + /// + /// svint64_t svldff1sh_s64(svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorInt16SignExtendFirstFaulting(Vector mask, short* address) => LoadVectorInt16SignExtendFirstFaulting(mask, address); + + /// + /// svuint32_t svldff1sh_u32(svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorInt16SignExtendFirstFaulting(Vector mask, short* address) => LoadVectorInt16SignExtendFirstFaulting(mask, address); + + /// + /// svuint64_t svldff1sh_u64(svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorInt16SignExtendFirstFaulting(Vector mask, short* address) => LoadVectorInt16SignExtendFirstFaulting(mask, address); + + + /// LoadVectorInt16SignExtendToInt32 : Load 16-bit data and sign-extend + + /// + /// svint32_t svld1sh_s32(svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorInt16SignExtendToInt32(Vector mask, short* address) => LoadVectorInt16SignExtendToInt32(mask, address); + + + /// LoadVectorInt16SignExtendToInt64 : Load 16-bit data and sign-extend + + /// + /// svint64_t svld1sh_s64(svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorInt16SignExtendToInt64(Vector mask, short* address) => LoadVectorInt16SignExtendToInt64(mask, address); + + + /// LoadVectorInt16SignExtendToUInt32 : Load 16-bit data and sign-extend + + /// + /// svuint32_t svld1sh_u32(svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorInt16SignExtendToUInt32(Vector mask, short* address) => LoadVectorInt16SignExtendToUInt32(mask, address); + + + /// LoadVectorInt16SignExtendToUInt64 : Load 16-bit data and sign-extend + + /// + /// svuint64_t svld1sh_u64(svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorInt16SignExtendToUInt64(Vector mask, short* address) => LoadVectorInt16SignExtendToUInt64(mask, address); + + + /// LoadVectorInt32NonFaultingSignExtendToInt64 : Load 32-bit data and sign-extend, non-faulting + + /// + /// svint64_t svldnf1sw_s64(svbool_t pg, const int32_t *base) + /// + public static unsafe Vector LoadVectorInt32NonFaultingSignExtendToInt64(int* address) => LoadVectorInt32NonFaultingSignExtendToInt64(address); + + + /// LoadVectorInt32NonFaultingSignExtendToUInt64 : Load 32-bit data and sign-extend, non-faulting + + /// + /// svuint64_t svldnf1sw_u64(svbool_t pg, const int32_t *base) + /// + public static unsafe Vector LoadVectorInt32NonFaultingSignExtendToUInt64(int* address) => LoadVectorInt32NonFaultingSignExtendToUInt64(address); + + + /// LoadVectorInt32SignExtendFirstFaulting : Load 32-bit data and sign-extend, first-faulting + + /// + /// svint64_t svldff1sw_s64(svbool_t pg, const int32_t *base) + /// + public static unsafe Vector LoadVectorInt32SignExtendFirstFaulting(Vector mask, int* address) => LoadVectorInt32SignExtendFirstFaulting(mask, address); + + /// + /// svuint64_t svldff1sw_u64(svbool_t pg, const int32_t *base) + /// + public static unsafe Vector LoadVectorInt32SignExtendFirstFaulting(Vector mask, int* address) => LoadVectorInt32SignExtendFirstFaulting(mask, address); + + + /// LoadVectorInt32SignExtendToInt64 : Load 32-bit data and sign-extend + + /// + /// svint64_t svld1sw_s64(svbool_t pg, const int32_t *base) + /// + public static unsafe Vector LoadVectorInt32SignExtendToInt64(Vector mask, int* address) => LoadVectorInt32SignExtendToInt64(mask, address); + + + /// LoadVectorInt32SignExtendToUInt64 : Load 32-bit data and sign-extend + + /// + /// svuint64_t svld1sw_u64(svbool_t pg, const int32_t *base) + /// + public static unsafe Vector LoadVectorInt32SignExtendToUInt64(Vector mask, int* address) => LoadVectorInt32SignExtendToUInt64(mask, address); + + + /// LoadVectorNonFaulting : Unextended load, non-faulting + + /// + /// svint8_t svldnf1[_s8](svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorNonFaulting(sbyte* address) => LoadVectorNonFaulting(address); + + /// + /// svint16_t svldnf1[_s16](svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorNonFaulting(short* address) => LoadVectorNonFaulting(address); + + /// + /// svint32_t svldnf1[_s32](svbool_t pg, const int32_t *base) + /// + public static unsafe Vector LoadVectorNonFaulting(int* address) => LoadVectorNonFaulting(address); + + /// + /// svint64_t svldnf1[_s64](svbool_t pg, const int64_t *base) + /// + public static unsafe Vector LoadVectorNonFaulting(long* address) => LoadVectorNonFaulting(address); + + /// + /// svuint8_t svldnf1[_u8](svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorNonFaulting(byte* address) => LoadVectorNonFaulting(address); + + /// + /// svuint16_t svldnf1[_u16](svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorNonFaulting(ushort* address) => LoadVectorNonFaulting(address); + + /// + /// svuint32_t svldnf1[_u32](svbool_t pg, const uint32_t *base) + /// + public static unsafe Vector LoadVectorNonFaulting(uint* address) => LoadVectorNonFaulting(address); + + /// + /// svuint64_t svldnf1[_u64](svbool_t pg, const uint64_t *base) + /// + public static unsafe Vector LoadVectorNonFaulting(ulong* address) => LoadVectorNonFaulting(address); + + /// + /// svfloat32_t svldnf1[_f32](svbool_t pg, const float32_t *base) + /// + public static unsafe Vector LoadVectorNonFaulting(float* address) => LoadVectorNonFaulting(address); + + /// + /// svfloat64_t svldnf1[_f64](svbool_t pg, const float64_t *base) + /// + public static unsafe Vector LoadVectorNonFaulting(double* address) => LoadVectorNonFaulting(address); + + + /// LoadVectorNonTemporal : Unextended load, non-temporal + + /// + /// svint8_t svldnt1[_s8](svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, sbyte* address) => LoadVectorNonTemporal(mask, address); + + /// + /// svint16_t svldnt1[_s16](svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, short* address) => LoadVectorNonTemporal(mask, address); + + /// + /// svint32_t svldnt1[_s32](svbool_t pg, const int32_t *base) + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, int* address) => LoadVectorNonTemporal(mask, address); + + /// + /// svint64_t svldnt1[_s64](svbool_t pg, const int64_t *base) + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, long* address) => LoadVectorNonTemporal(mask, address); + + /// + /// svuint8_t svldnt1[_u8](svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, byte* address) => LoadVectorNonTemporal(mask, address); + + /// + /// svuint16_t svldnt1[_u16](svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, ushort* address) => LoadVectorNonTemporal(mask, address); + + /// + /// svuint32_t svldnt1[_u32](svbool_t pg, const uint32_t *base) + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, uint* address) => LoadVectorNonTemporal(mask, address); + + /// + /// svuint64_t svldnt1[_u64](svbool_t pg, const uint64_t *base) + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, ulong* address) => LoadVectorNonTemporal(mask, address); + + /// + /// svfloat32_t svldnt1[_f32](svbool_t pg, const float32_t *base) + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, float* address) => LoadVectorNonTemporal(mask, address); + + /// + /// svfloat64_t svldnt1[_f64](svbool_t pg, const float64_t *base) + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, double* address) => LoadVectorNonTemporal(mask, address); + + + /// LoadVectorSByteNonFaultingSignExtendToInt16 : Load 8-bit data and sign-extend, non-faulting + + /// + /// svint16_t svldnf1sb_s16(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToInt16(sbyte* address) => LoadVectorSByteNonFaultingSignExtendToInt16(address); + + + /// LoadVectorSByteNonFaultingSignExtendToInt32 : Load 8-bit data and sign-extend, non-faulting + + /// + /// svint32_t svldnf1sb_s32(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToInt32(sbyte* address) => LoadVectorSByteNonFaultingSignExtendToInt32(address); + + + /// LoadVectorSByteNonFaultingSignExtendToInt64 : Load 8-bit data and sign-extend, non-faulting + + /// + /// svint64_t svldnf1sb_s64(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToInt64(sbyte* address) => LoadVectorSByteNonFaultingSignExtendToInt64(address); + + + /// LoadVectorSByteNonFaultingSignExtendToUInt16 : Load 8-bit data and sign-extend, non-faulting + + /// + /// svuint16_t svldnf1sb_u16(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToUInt16(sbyte* address) => LoadVectorSByteNonFaultingSignExtendToUInt16(address); + + + /// LoadVectorSByteNonFaultingSignExtendToUInt32 : Load 8-bit data and sign-extend, non-faulting + + /// + /// svuint32_t svldnf1sb_u32(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToUInt32(sbyte* address) => LoadVectorSByteNonFaultingSignExtendToUInt32(address); + + + /// LoadVectorSByteNonFaultingSignExtendToUInt64 : Load 8-bit data and sign-extend, non-faulting + + /// + /// svuint64_t svldnf1sb_u64(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToUInt64(sbyte* address) => LoadVectorSByteNonFaultingSignExtendToUInt64(address); + + + /// LoadVectorSByteSignExtendFirstFaulting : Load 8-bit data and sign-extend, first-faulting + + /// + /// svint16_t svldff1sb_s16(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address) => LoadVectorSByteSignExtendFirstFaulting(mask, address); + + /// + /// svint32_t svldff1sb_s32(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address) => LoadVectorSByteSignExtendFirstFaulting(mask, address); + + /// + /// svint64_t svldff1sb_s64(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address) => LoadVectorSByteSignExtendFirstFaulting(mask, address); + + /// + /// svuint16_t svldff1sb_u16(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address) => LoadVectorSByteSignExtendFirstFaulting(mask, address); + + /// + /// svuint32_t svldff1sb_u32(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address) => LoadVectorSByteSignExtendFirstFaulting(mask, address); + + /// + /// svuint64_t svldff1sb_u64(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address) => LoadVectorSByteSignExtendFirstFaulting(mask, address); + + + /// LoadVectorSByteSignExtendToInt16 : Load 8-bit data and sign-extend + + /// + /// svint16_t svld1sb_s16(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteSignExtendToInt16(Vector mask, sbyte* address) => LoadVectorSByteSignExtendToInt16(mask, address); + + + /// LoadVectorSByteSignExtendToInt32 : Load 8-bit data and sign-extend + + /// + /// svint32_t svld1sb_s32(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteSignExtendToInt32(Vector mask, sbyte* address) => LoadVectorSByteSignExtendToInt32(mask, address); + + + /// LoadVectorSByteSignExtendToInt64 : Load 8-bit data and sign-extend + + /// + /// svint64_t svld1sb_s64(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteSignExtendToInt64(Vector mask, sbyte* address) => LoadVectorSByteSignExtendToInt64(mask, address); + + + /// LoadVectorSByteSignExtendToUInt16 : Load 8-bit data and sign-extend + + /// + /// svuint16_t svld1sb_u16(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteSignExtendToUInt16(Vector mask, sbyte* address) => LoadVectorSByteSignExtendToUInt16(mask, address); + + + /// LoadVectorSByteSignExtendToUInt32 : Load 8-bit data and sign-extend + + /// + /// svuint32_t svld1sb_u32(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteSignExtendToUInt32(Vector mask, sbyte* address) => LoadVectorSByteSignExtendToUInt32(mask, address); + + + /// LoadVectorSByteSignExtendToUInt64 : Load 8-bit data and sign-extend + + /// + /// svuint64_t svld1sb_u64(svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVectorSByteSignExtendToUInt64(Vector mask, sbyte* address) => LoadVectorSByteSignExtendToUInt64(mask, address); + + + /// LoadVectorUInt16NonFaultingZeroExtendToInt32 : Load 16-bit data and zero-extend, non-faulting + + /// + /// svint32_t svldnf1uh_s32(svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorUInt16NonFaultingZeroExtendToInt32(ushort* address) => LoadVectorUInt16NonFaultingZeroExtendToInt32(address); + + + /// LoadVectorUInt16NonFaultingZeroExtendToInt64 : Load 16-bit data and zero-extend, non-faulting + + /// + /// svint64_t svldnf1uh_s64(svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorUInt16NonFaultingZeroExtendToInt64(ushort* address) => LoadVectorUInt16NonFaultingZeroExtendToInt64(address); + + + /// LoadVectorUInt16NonFaultingZeroExtendToUInt32 : Load 16-bit data and zero-extend, non-faulting + + /// + /// svuint32_t svldnf1uh_u32(svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorUInt16NonFaultingZeroExtendToUInt32(ushort* address) => LoadVectorUInt16NonFaultingZeroExtendToUInt32(address); + + + /// LoadVectorUInt16NonFaultingZeroExtendToUInt64 : Load 16-bit data and zero-extend, non-faulting + + /// + /// svuint64_t svldnf1uh_u64(svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorUInt16NonFaultingZeroExtendToUInt64(ushort* address) => LoadVectorUInt16NonFaultingZeroExtendToUInt64(address); + + + /// LoadVectorUInt16ZeroExtendFirstFaulting : Load 16-bit data and zero-extend, first-faulting + + /// + /// svint32_t svldff1uh_s32(svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address) => LoadVectorUInt16ZeroExtendFirstFaulting(mask, address); + + /// + /// svint64_t svldff1uh_s64(svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address) => LoadVectorUInt16ZeroExtendFirstFaulting(mask, address); + + /// + /// svuint32_t svldff1uh_u32(svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address) => LoadVectorUInt16ZeroExtendFirstFaulting(mask, address); + + /// + /// svuint64_t svldff1uh_u64(svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address) => LoadVectorUInt16ZeroExtendFirstFaulting(mask, address); + + + /// LoadVectorUInt16ZeroExtendToInt32 : Load 16-bit data and zero-extend + + /// + /// svint32_t svld1uh_s32(svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorUInt16ZeroExtendToInt32(Vector mask, ushort* address) => LoadVectorUInt16ZeroExtendToInt32(mask, address); + + + /// LoadVectorUInt16ZeroExtendToInt64 : Load 16-bit data and zero-extend + + /// + /// svint64_t svld1uh_s64(svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorUInt16ZeroExtendToInt64(Vector mask, ushort* address) => LoadVectorUInt16ZeroExtendToInt64(mask, address); + + + /// LoadVectorUInt16ZeroExtendToUInt32 : Load 16-bit data and zero-extend + + /// + /// svuint32_t svld1uh_u32(svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorUInt16ZeroExtendToUInt32(Vector mask, ushort* address) => LoadVectorUInt16ZeroExtendToUInt32(mask, address); + + + /// LoadVectorUInt16ZeroExtendToUInt64 : Load 16-bit data and zero-extend + + /// + /// svuint64_t svld1uh_u64(svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVectorUInt16ZeroExtendToUInt64(Vector mask, ushort* address) => LoadVectorUInt16ZeroExtendToUInt64(mask, address); + + + /// LoadVectorUInt32NonFaultingZeroExtendToInt64 : Load 32-bit data and zero-extend, non-faulting + + /// + /// svint64_t svldnf1uw_s64(svbool_t pg, const uint32_t *base) + /// + public static unsafe Vector LoadVectorUInt32NonFaultingZeroExtendToInt64(uint* address) => LoadVectorUInt32NonFaultingZeroExtendToInt64(address); + + + /// LoadVectorUInt32NonFaultingZeroExtendToUInt64 : Load 32-bit data and zero-extend, non-faulting + + /// + /// svuint64_t svldnf1uw_u64(svbool_t pg, const uint32_t *base) + /// + public static unsafe Vector LoadVectorUInt32NonFaultingZeroExtendToUInt64(uint* address) => LoadVectorUInt32NonFaultingZeroExtendToUInt64(address); + + + /// LoadVectorUInt32ZeroExtendFirstFaulting : Load 32-bit data and zero-extend, first-faulting + + /// + /// svint64_t svldff1uw_s64(svbool_t pg, const uint32_t *base) + /// + public static unsafe Vector LoadVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address) => LoadVectorUInt32ZeroExtendFirstFaulting(mask, address); + + /// + /// svuint64_t svldff1uw_u64(svbool_t pg, const uint32_t *base) + /// + public static unsafe Vector LoadVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address) => LoadVectorUInt32ZeroExtendFirstFaulting(mask, address); + + + /// LoadVectorUInt32ZeroExtendToInt64 : Load 32-bit data and zero-extend + + /// + /// svint64_t svld1uw_s64(svbool_t pg, const uint32_t *base) + /// + public static unsafe Vector LoadVectorUInt32ZeroExtendToInt64(Vector mask, uint* address) => LoadVectorUInt32ZeroExtendToInt64(mask, address); + + + /// LoadVectorUInt32ZeroExtendToUInt64 : Load 32-bit data and zero-extend + + /// + /// svuint64_t svld1uw_u64(svbool_t pg, const uint32_t *base) + /// + public static unsafe Vector LoadVectorUInt32ZeroExtendToUInt64(Vector mask, uint* address) => LoadVectorUInt32ZeroExtendToUInt64(mask, address); + + + /// LoadVectorx2 : Load two-element tuples into two vectors + + /// + /// svint8x2_t svld2[_s8](svbool_t pg, const int8_t *base) + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, sbyte* address) => LoadVectorx2(mask, address); + + /// + /// svint16x2_t svld2[_s16](svbool_t pg, const int16_t *base) + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, short* address) => LoadVectorx2(mask, address); + + /// + /// svint32x2_t svld2[_s32](svbool_t pg, const int32_t *base) + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, int* address) => LoadVectorx2(mask, address); + + /// + /// svint64x2_t svld2[_s64](svbool_t pg, const int64_t *base) + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, long* address) => LoadVectorx2(mask, address); + + /// + /// svuint8x2_t svld2[_u8](svbool_t pg, const uint8_t *base) + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, byte* address) => LoadVectorx2(mask, address); + + /// + /// svuint16x2_t svld2[_u16](svbool_t pg, const uint16_t *base) + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, ushort* address) => LoadVectorx2(mask, address); + + /// + /// svuint32x2_t svld2[_u32](svbool_t pg, const uint32_t *base) + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, uint* address) => LoadVectorx2(mask, address); + + /// + /// svuint64x2_t svld2[_u64](svbool_t pg, const uint64_t *base) + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, ulong* address) => LoadVectorx2(mask, address); + + /// + /// svfloat32x2_t svld2[_f32](svbool_t pg, const float32_t *base) + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, float* address) => LoadVectorx2(mask, address); + + /// + /// svfloat64x2_t svld2[_f64](svbool_t pg, const float64_t *base) + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, double* address) => LoadVectorx2(mask, address); + + + /// LoadVectorx3 : Load three-element tuples into three vectors + + /// + /// svint8x3_t svld3[_s8](svbool_t pg, const int8_t *base) + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, sbyte* address) => LoadVectorx3(mask, address); + + /// + /// svint16x3_t svld3[_s16](svbool_t pg, const int16_t *base) + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, short* address) => LoadVectorx3(mask, address); + + /// + /// svint32x3_t svld3[_s32](svbool_t pg, const int32_t *base) + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, int* address) => LoadVectorx3(mask, address); + + /// + /// svint64x3_t svld3[_s64](svbool_t pg, const int64_t *base) + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, long* address) => LoadVectorx3(mask, address); + + /// + /// svuint8x3_t svld3[_u8](svbool_t pg, const uint8_t *base) + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, byte* address) => LoadVectorx3(mask, address); + + /// + /// svuint16x3_t svld3[_u16](svbool_t pg, const uint16_t *base) + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, ushort* address) => LoadVectorx3(mask, address); + + /// + /// svuint32x3_t svld3[_u32](svbool_t pg, const uint32_t *base) + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, uint* address) => LoadVectorx3(mask, address); + + /// + /// svuint64x3_t svld3[_u64](svbool_t pg, const uint64_t *base) + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, ulong* address) => LoadVectorx3(mask, address); + + /// + /// svfloat32x3_t svld3[_f32](svbool_t pg, const float32_t *base) + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, float* address) => LoadVectorx3(mask, address); + + /// + /// svfloat64x3_t svld3[_f64](svbool_t pg, const float64_t *base) + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, double* address) => LoadVectorx3(mask, address); + + + /// LoadVectorx4 : Load four-element tuples into four vectors + + /// + /// svint8x4_t svld4[_s8](svbool_t pg, const int8_t *base) + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, sbyte* address) => LoadVectorx4(mask, address); + + /// + /// svint16x4_t svld4[_s16](svbool_t pg, const int16_t *base) + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, short* address) => LoadVectorx4(mask, address); + + /// + /// svint32x4_t svld4[_s32](svbool_t pg, const int32_t *base) + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, int* address) => LoadVectorx4(mask, address); + + /// + /// svint64x4_t svld4[_s64](svbool_t pg, const int64_t *base) + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, long* address) => LoadVectorx4(mask, address); + + /// + /// svuint8x4_t svld4[_u8](svbool_t pg, const uint8_t *base) + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, byte* address) => LoadVectorx4(mask, address); + + /// + /// svuint16x4_t svld4[_u16](svbool_t pg, const uint16_t *base) + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, ushort* address) => LoadVectorx4(mask, address); + + /// + /// svuint32x4_t svld4[_u32](svbool_t pg, const uint32_t *base) + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, uint* address) => LoadVectorx4(mask, address); + + /// + /// svuint64x4_t svld4[_u64](svbool_t pg, const uint64_t *base) + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, ulong* address) => LoadVectorx4(mask, address); + + /// + /// svfloat32x4_t svld4[_f32](svbool_t pg, const float32_t *base) + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, float* address) => LoadVectorx4(mask, address); + + /// + /// svfloat64x4_t svld4[_f64](svbool_t pg, const float64_t *base) + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, double* address) => LoadVectorx4(mask, address); + + + /// Max : Maximum + + /// + /// svint8_t svmax[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svmax[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svmax[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector Max(Vector left, Vector right) => Max(left, right); + + /// + /// svint16_t svmax[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svmax[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svmax[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector Max(Vector left, Vector right) => Max(left, right); + + /// + /// svint32_t svmax[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svmax[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svmax[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector Max(Vector left, Vector right) => Max(left, right); + + /// + /// svint64_t svmax[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svmax[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svmax[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector Max(Vector left, Vector right) => Max(left, right); + + /// + /// svuint8_t svmax[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svmax[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svmax[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector Max(Vector left, Vector right) => Max(left, right); + + /// + /// svuint16_t svmax[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svmax[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svmax[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector Max(Vector left, Vector right) => Max(left, right); + + /// + /// svuint32_t svmax[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svmax[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svmax[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector Max(Vector left, Vector right) => Max(left, right); + + /// + /// svuint64_t svmax[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svmax[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svmax[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector Max(Vector left, Vector right) => Max(left, right); + + /// + /// svfloat32_t svmax[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svmax[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svmax[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector Max(Vector left, Vector right) => Max(left, right); + + /// + /// svfloat64_t svmax[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svmax[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svmax[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector Max(Vector left, Vector right) => Max(left, right); + + + /// MaxAcross : Maximum reduction to scalar + + /// + /// int8_t svmaxv[_s8](svbool_t pg, svint8_t op) + /// + public static unsafe Vector MaxAcross(Vector value) => MaxAcross(value); + + /// + /// int16_t svmaxv[_s16](svbool_t pg, svint16_t op) + /// + public static unsafe Vector MaxAcross(Vector value) => MaxAcross(value); + + /// + /// int32_t svmaxv[_s32](svbool_t pg, svint32_t op) + /// + public static unsafe Vector MaxAcross(Vector value) => MaxAcross(value); + + /// + /// int64_t svmaxv[_s64](svbool_t pg, svint64_t op) + /// + public static unsafe Vector MaxAcross(Vector value) => MaxAcross(value); + + /// + /// uint8_t svmaxv[_u8](svbool_t pg, svuint8_t op) + /// + public static unsafe Vector MaxAcross(Vector value) => MaxAcross(value); + + /// + /// uint16_t svmaxv[_u16](svbool_t pg, svuint16_t op) + /// + public static unsafe Vector MaxAcross(Vector value) => MaxAcross(value); + + /// + /// uint32_t svmaxv[_u32](svbool_t pg, svuint32_t op) + /// + public static unsafe Vector MaxAcross(Vector value) => MaxAcross(value); + + /// + /// uint64_t svmaxv[_u64](svbool_t pg, svuint64_t op) + /// + public static unsafe Vector MaxAcross(Vector value) => MaxAcross(value); + + /// + /// float32_t svmaxv[_f32](svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector MaxAcross(Vector value) => MaxAcross(value); + + /// + /// float64_t svmaxv[_f64](svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector MaxAcross(Vector value) => MaxAcross(value); + + + /// MaxNumber : Maximum number + + /// + /// svfloat32_t svmaxnm[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svmaxnm[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svmaxnm[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector MaxNumber(Vector left, Vector right) => MaxNumber(left, right); + + /// + /// svfloat64_t svmaxnm[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svmaxnm[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svmaxnm[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector MaxNumber(Vector left, Vector right) => MaxNumber(left, right); + + + /// MaxNumberAcross : Maximum number reduction to scalar + + /// + /// float32_t svmaxnmv[_f32](svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector MaxNumberAcross(Vector value) => MaxNumberAcross(value); + + /// + /// float64_t svmaxnmv[_f64](svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector MaxNumberAcross(Vector value) => MaxNumberAcross(value); + + + /// Min : Minimum + + /// + /// svint8_t svmin[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svmin[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svmin[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector Min(Vector left, Vector right) => Min(left, right); + + /// + /// svint16_t svmin[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svmin[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svmin[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector Min(Vector left, Vector right) => Min(left, right); + + /// + /// svint32_t svmin[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svmin[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svmin[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector Min(Vector left, Vector right) => Min(left, right); + + /// + /// svint64_t svmin[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svmin[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svmin[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector Min(Vector left, Vector right) => Min(left, right); + + /// + /// svuint8_t svmin[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svmin[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svmin[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector Min(Vector left, Vector right) => Min(left, right); + + /// + /// svuint16_t svmin[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svmin[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svmin[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector Min(Vector left, Vector right) => Min(left, right); + + /// + /// svuint32_t svmin[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svmin[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svmin[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector Min(Vector left, Vector right) => Min(left, right); + + /// + /// svuint64_t svmin[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svmin[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svmin[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector Min(Vector left, Vector right) => Min(left, right); + + /// + /// svfloat32_t svmin[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svmin[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svmin[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector Min(Vector left, Vector right) => Min(left, right); + + /// + /// svfloat64_t svmin[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svmin[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svmin[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector Min(Vector left, Vector right) => Min(left, right); + + + /// MinAcross : Minimum reduction to scalar + + /// + /// int8_t svminv[_s8](svbool_t pg, svint8_t op) + /// + public static unsafe Vector MinAcross(Vector value) => MinAcross(value); + + /// + /// int16_t svminv[_s16](svbool_t pg, svint16_t op) + /// + public static unsafe Vector MinAcross(Vector value) => MinAcross(value); + + /// + /// int32_t svminv[_s32](svbool_t pg, svint32_t op) + /// + public static unsafe Vector MinAcross(Vector value) => MinAcross(value); + + /// + /// int64_t svminv[_s64](svbool_t pg, svint64_t op) + /// + public static unsafe Vector MinAcross(Vector value) => MinAcross(value); + + /// + /// uint8_t svminv[_u8](svbool_t pg, svuint8_t op) + /// + public static unsafe Vector MinAcross(Vector value) => MinAcross(value); + + /// + /// uint16_t svminv[_u16](svbool_t pg, svuint16_t op) + /// + public static unsafe Vector MinAcross(Vector value) => MinAcross(value); + + /// + /// uint32_t svminv[_u32](svbool_t pg, svuint32_t op) + /// + public static unsafe Vector MinAcross(Vector value) => MinAcross(value); + + /// + /// uint64_t svminv[_u64](svbool_t pg, svuint64_t op) + /// + public static unsafe Vector MinAcross(Vector value) => MinAcross(value); + + /// + /// float32_t svminv[_f32](svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector MinAcross(Vector value) => MinAcross(value); + + /// + /// float64_t svminv[_f64](svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector MinAcross(Vector value) => MinAcross(value); + + + /// MinNumber : Minimum number + + /// + /// svfloat32_t svminnm[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svminnm[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svminnm[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector MinNumber(Vector left, Vector right) => MinNumber(left, right); + + /// + /// svfloat64_t svminnm[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svminnm[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svminnm[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector MinNumber(Vector left, Vector right) => MinNumber(left, right); + + + /// MinNumberAcross : Minimum number reduction to scalar + + /// + /// float32_t svminnmv[_f32](svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector MinNumberAcross(Vector value) => MinNumberAcross(value); + + /// + /// float64_t svminnmv[_f64](svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector MinNumberAcross(Vector value) => MinNumberAcross(value); + + + + /// Multiply : Multiply + + /// + /// svint8_t svmul[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svmul[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svmul[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector Multiply(Vector left, Vector right) => Multiply(left, right); + + /// + /// svint16_t svmul[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svmul[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svmul[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector Multiply(Vector left, Vector right) => Multiply(left, right); + + /// + /// svint32_t svmul[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svmul[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svmul[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector Multiply(Vector left, Vector right) => Multiply(left, right); + + /// + /// svint64_t svmul[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svmul[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svmul[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector Multiply(Vector left, Vector right) => Multiply(left, right); + + /// + /// svuint8_t svmul[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svmul[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svmul[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector Multiply(Vector left, Vector right) => Multiply(left, right); + + /// + /// svuint16_t svmul[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svmul[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svmul[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector Multiply(Vector left, Vector right) => Multiply(left, right); + + /// + /// svuint32_t svmul[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svmul[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svmul[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector Multiply(Vector left, Vector right) => Multiply(left, right); + + /// + /// svuint64_t svmul[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svmul[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svmul[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector Multiply(Vector left, Vector right) => Multiply(left, right); + + /// + /// svfloat32_t svmul[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svmul[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svmul[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector Multiply(Vector left, Vector right) => Multiply(left, right); + + /// + /// svfloat64_t svmul[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svmul[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svmul[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector Multiply(Vector left, Vector right) => Multiply(left, right); + + + /// MultiplyAdd : Multiply-add, addend first + + /// + /// svint8_t svmla[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3) + /// svint8_t svmla[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3) + /// svint8_t svmla[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right) => MultiplyAdd(addend, left, right); + + /// + /// svint16_t svmla[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3) + /// svint16_t svmla[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3) + /// svint16_t svmla[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right) => MultiplyAdd(addend, left, right); + + /// + /// svint32_t svmla[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3) + /// svint32_t svmla[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3) + /// svint32_t svmla[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right) => MultiplyAdd(addend, left, right); + + /// + /// svint64_t svmla[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3) + /// svint64_t svmla[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3) + /// svint64_t svmla[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3) + /// + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right) => MultiplyAdd(addend, left, right); + + /// + /// svuint8_t svmla[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// svuint8_t svmla[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// svuint8_t svmla[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right) => MultiplyAdd(addend, left, right); + + /// + /// svuint16_t svmla[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// svuint16_t svmla[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// svuint16_t svmla[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right) => MultiplyAdd(addend, left, right); + + /// + /// svuint32_t svmla[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// svuint32_t svmla[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// svuint32_t svmla[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right) => MultiplyAdd(addend, left, right); + + /// + /// svuint64_t svmla[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// svuint64_t svmla[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// svuint64_t svmla[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right) => MultiplyAdd(addend, left, right); + + + + + /// MultiplyAddRotateComplex : Complex multiply-add with rotate + + /// + /// svfloat32_t svcmla[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_rotation) + /// svfloat32_t svcmla[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_rotation) + /// svfloat32_t svcmla[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) => MultiplyAddRotateComplex(addend, left, right, rotation); + + /// + /// svfloat64_t svcmla[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3, uint64_t imm_rotation) + /// svfloat64_t svcmla[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3, uint64_t imm_rotation) + /// svfloat64_t svcmla[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) => MultiplyAddRotateComplex(addend, left, right, rotation); + + + /// MultiplyAddRotateComplexBySelectedScalar : Complex multiply-add with rotate + + /// + /// svfloat32_t svcmla_lane[_f32](svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation) => MultiplyAddRotateComplexBySelectedScalar(addend, left, right, rightIndex, rotation); + + + /// MultiplyBySelectedScalar : Multiply + + /// + /// svfloat32_t svmul_lane[_f32](svfloat32_t op1, svfloat32_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); + + /// + /// svfloat64_t svmul_lane[_f64](svfloat64_t op1, svfloat64_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); + + + /// MultiplyExtended : Multiply extended (∞×0=2) + + /// + /// svfloat32_t svmulx[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svmulx[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svmulx[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector MultiplyExtended(Vector left, Vector right) => MultiplyExtended(left, right); + + /// + /// svfloat64_t svmulx[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svmulx[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svmulx[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector MultiplyExtended(Vector left, Vector right) => MultiplyExtended(left, right); + + + + /// MultiplySubtract : Multiply-subtract, minuend first + + /// + /// svint8_t svmls[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3) + /// svint8_t svmls[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3) + /// svint8_t svmls[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right) => MultiplySubtract(minuend, left, right); + + /// + /// svint16_t svmls[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3) + /// svint16_t svmls[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3) + /// svint16_t svmls[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right) => MultiplySubtract(minuend, left, right); + + /// + /// svint32_t svmls[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3) + /// svint32_t svmls[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3) + /// svint32_t svmls[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right) => MultiplySubtract(minuend, left, right); + + /// + /// svint64_t svmls[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3) + /// svint64_t svmls[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3) + /// svint64_t svmls[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3) + /// + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right) => MultiplySubtract(minuend, left, right); + + /// + /// svuint8_t svmls[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// svuint8_t svmls[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// svuint8_t svmls[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right) => MultiplySubtract(minuend, left, right); + + /// + /// svuint16_t svmls[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// svuint16_t svmls[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// svuint16_t svmls[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right) => MultiplySubtract(minuend, left, right); + + /// + /// svuint32_t svmls[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// svuint32_t svmls[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// svuint32_t svmls[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right) => MultiplySubtract(minuend, left, right); + + /// + /// svuint64_t svmls[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// svuint64_t svmls[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// svuint64_t svmls[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right) => MultiplySubtract(minuend, left, right); + + + + + /// Negate : Negate + + /// + /// svint8_t svneg[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) + /// svint8_t svneg[_s8]_x(svbool_t pg, svint8_t op) + /// svint8_t svneg[_s8]_z(svbool_t pg, svint8_t op) + /// + public static unsafe Vector Negate(Vector value) => Negate(value); + + /// + /// svint16_t svneg[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) + /// svint16_t svneg[_s16]_x(svbool_t pg, svint16_t op) + /// svint16_t svneg[_s16]_z(svbool_t pg, svint16_t op) + /// + public static unsafe Vector Negate(Vector value) => Negate(value); + + /// + /// svint32_t svneg[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// svint32_t svneg[_s32]_x(svbool_t pg, svint32_t op) + /// svint32_t svneg[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector Negate(Vector value) => Negate(value); + + /// + /// svint64_t svneg[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// svint64_t svneg[_s64]_x(svbool_t pg, svint64_t op) + /// svint64_t svneg[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector Negate(Vector value) => Negate(value); + + /// + /// svfloat32_t svneg[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) + /// svfloat32_t svneg[_f32]_x(svbool_t pg, svfloat32_t op) + /// svfloat32_t svneg[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector Negate(Vector value) => Negate(value); + + /// + /// svfloat64_t svneg[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) + /// svfloat64_t svneg[_f64]_x(svbool_t pg, svfloat64_t op) + /// svfloat64_t svneg[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector Negate(Vector value) => Negate(value); + + + + + /// Not : Bitwise invert + + /// + /// svint8_t svnot[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) + /// svint8_t svnot[_s8]_x(svbool_t pg, svint8_t op) + /// svint8_t svnot[_s8]_z(svbool_t pg, svint8_t op) + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector Not(Vector value) => Not(value); + + /// + /// svint16_t svnot[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) + /// svint16_t svnot[_s16]_x(svbool_t pg, svint16_t op) + /// svint16_t svnot[_s16]_z(svbool_t pg, svint16_t op) + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector Not(Vector value) => Not(value); + + /// + /// svint32_t svnot[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// svint32_t svnot[_s32]_x(svbool_t pg, svint32_t op) + /// svint32_t svnot[_s32]_z(svbool_t pg, svint32_t op) + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector Not(Vector value) => Not(value); + + /// + /// svint64_t svnot[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// svint64_t svnot[_s64]_x(svbool_t pg, svint64_t op) + /// svint64_t svnot[_s64]_z(svbool_t pg, svint64_t op) + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector Not(Vector value) => Not(value); + + /// + /// svuint8_t svnot[_u8]_m(svuint8_t inactive, svbool_t pg, svuint8_t op) + /// svuint8_t svnot[_u8]_x(svbool_t pg, svuint8_t op) + /// svuint8_t svnot[_u8]_z(svbool_t pg, svuint8_t op) + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector Not(Vector value) => Not(value); + + /// + /// svuint16_t svnot[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) + /// svuint16_t svnot[_u16]_x(svbool_t pg, svuint16_t op) + /// svuint16_t svnot[_u16]_z(svbool_t pg, svuint16_t op) + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector Not(Vector value) => Not(value); + + /// + /// svuint32_t svnot[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// svuint32_t svnot[_u32]_x(svbool_t pg, svuint32_t op) + /// svuint32_t svnot[_u32]_z(svbool_t pg, svuint32_t op) + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector Not(Vector value) => Not(value); + + /// + /// svuint64_t svnot[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// svuint64_t svnot[_u64]_x(svbool_t pg, svuint64_t op) + /// svuint64_t svnot[_u64]_z(svbool_t pg, svuint64_t op) + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) + /// + public static unsafe Vector Not(Vector value) => Not(value); + + + /// Or : Bitwise inclusive OR + + /// + /// svint8_t svorr[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svorr[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svorr[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Or(Vector left, Vector right) => Or(left, right); + + /// + /// svint16_t svorr[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svorr[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svorr[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Or(Vector left, Vector right) => Or(left, right); + + /// + /// svint32_t svorr[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svorr[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svorr[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Or(Vector left, Vector right) => Or(left, right); + + /// + /// svint64_t svorr[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svorr[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svorr[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Or(Vector left, Vector right) => Or(left, right); + + /// + /// svuint8_t svorr[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svorr[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svorr[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Or(Vector left, Vector right) => Or(left, right); + + /// + /// svuint16_t svorr[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svorr[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svorr[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Or(Vector left, Vector right) => Or(left, right); + + /// + /// svuint32_t svorr[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svorr[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svorr[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Or(Vector left, Vector right) => Or(left, right); + + /// + /// svuint64_t svorr[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svorr[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svorr[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Or(Vector left, Vector right) => Or(left, right); + + + /// OrAcross : Bitwise inclusive OR reduction to scalar + + /// + /// int8_t svorv[_s8](svbool_t pg, svint8_t op) + /// + public static unsafe Vector OrAcross(Vector value) => OrAcross(value); + + /// + /// int16_t svorv[_s16](svbool_t pg, svint16_t op) + /// + public static unsafe Vector OrAcross(Vector value) => OrAcross(value); + + /// + /// int32_t svorv[_s32](svbool_t pg, svint32_t op) + /// + public static unsafe Vector OrAcross(Vector value) => OrAcross(value); + + /// + /// int64_t svorv[_s64](svbool_t pg, svint64_t op) + /// + public static unsafe Vector OrAcross(Vector value) => OrAcross(value); + + /// + /// uint8_t svorv[_u8](svbool_t pg, svuint8_t op) + /// + public static unsafe Vector OrAcross(Vector value) => OrAcross(value); + + /// + /// uint16_t svorv[_u16](svbool_t pg, svuint16_t op) + /// + public static unsafe Vector OrAcross(Vector value) => OrAcross(value); + + /// + /// uint32_t svorv[_u32](svbool_t pg, svuint32_t op) + /// + public static unsafe Vector OrAcross(Vector value) => OrAcross(value); + + /// + /// uint64_t svorv[_u64](svbool_t pg, svuint64_t op) + /// + public static unsafe Vector OrAcross(Vector value) => OrAcross(value); + + + /// OrNot : Bitwise NOR + + /// + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector OrNot(Vector left, Vector right) => OrNot(left, right); + + /// + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector OrNot(Vector left, Vector right) => OrNot(left, right); + + /// + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector OrNot(Vector left, Vector right) => OrNot(left, right); + + /// + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector OrNot(Vector left, Vector right) => OrNot(left, right); + + /// + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector OrNot(Vector left, Vector right) => OrNot(left, right); + + /// + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector OrNot(Vector left, Vector right) => OrNot(left, right); + + /// + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector OrNot(Vector left, Vector right) => OrNot(left, right); + + /// + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector OrNot(Vector left, Vector right) => OrNot(left, right); + + + /// PopCount : Count nonzero bits + + /// + /// svuint8_t svcnt[_s8]_m(svuint8_t inactive, svbool_t pg, svint8_t op) + /// svuint8_t svcnt[_s8]_x(svbool_t pg, svint8_t op) + /// svuint8_t svcnt[_s8]_z(svbool_t pg, svint8_t op) + /// + public static unsafe Vector PopCount(Vector value) => PopCount(value); + + /// + /// svuint8_t svcnt[_u8]_m(svuint8_t inactive, svbool_t pg, svuint8_t op) + /// svuint8_t svcnt[_u8]_x(svbool_t pg, svuint8_t op) + /// svuint8_t svcnt[_u8]_z(svbool_t pg, svuint8_t op) + /// + public static unsafe Vector PopCount(Vector value) => PopCount(value); + + /// + /// svuint16_t svcnt[_s16]_m(svuint16_t inactive, svbool_t pg, svint16_t op) + /// svuint16_t svcnt[_s16]_x(svbool_t pg, svint16_t op) + /// svuint16_t svcnt[_s16]_z(svbool_t pg, svint16_t op) + /// + public static unsafe Vector PopCount(Vector value) => PopCount(value); + + /// + /// svuint16_t svcnt[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) + /// svuint16_t svcnt[_u16]_x(svbool_t pg, svuint16_t op) + /// svuint16_t svcnt[_u16]_z(svbool_t pg, svuint16_t op) + /// + public static unsafe Vector PopCount(Vector value) => PopCount(value); + + /// + /// svuint32_t svcnt[_s32]_m(svuint32_t inactive, svbool_t pg, svint32_t op) + /// svuint32_t svcnt[_s32]_x(svbool_t pg, svint32_t op) + /// svuint32_t svcnt[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector PopCount(Vector value) => PopCount(value); + + /// + /// svuint32_t svcnt[_f32]_m(svuint32_t inactive, svbool_t pg, svfloat32_t op) + /// svuint32_t svcnt[_f32]_x(svbool_t pg, svfloat32_t op) + /// svuint32_t svcnt[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector PopCount(Vector value) => PopCount(value); + + /// + /// svuint32_t svcnt[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// svuint32_t svcnt[_u32]_x(svbool_t pg, svuint32_t op) + /// svuint32_t svcnt[_u32]_z(svbool_t pg, svuint32_t op) + /// + public static unsafe Vector PopCount(Vector value) => PopCount(value); + + /// + /// svuint64_t svcnt[_s64]_m(svuint64_t inactive, svbool_t pg, svint64_t op) + /// svuint64_t svcnt[_s64]_x(svbool_t pg, svint64_t op) + /// svuint64_t svcnt[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector PopCount(Vector value) => PopCount(value); + + /// + /// svuint64_t svcnt[_f64]_m(svuint64_t inactive, svbool_t pg, svfloat64_t op) + /// svuint64_t svcnt[_f64]_x(svbool_t pg, svfloat64_t op) + /// svuint64_t svcnt[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector PopCount(Vector value) => PopCount(value); + + /// + /// svuint64_t svcnt[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// svuint64_t svcnt[_u64]_x(svbool_t pg, svuint64_t op) + /// svuint64_t svcnt[_u64]_z(svbool_t pg, svuint64_t op) + /// + public static unsafe Vector PopCount(Vector value) => PopCount(value); + + + /// PrefetchBytes : Prefetch bytes + + /// + /// void svprfb(svbool_t pg, const void *base, enum svprfop op) + /// + public static unsafe void PrefetchBytes(Vector mask, void* address, [ConstantExpected] SvePrefetchType prefetchType) => PrefetchBytes(mask, address, prefetchType); + + + /// PrefetchInt16 : Prefetch halfwords + + /// + /// void svprfh(svbool_t pg, const void *base, enum svprfop op) + /// + public static unsafe void PrefetchInt16(Vector mask, void* address, [ConstantExpected] SvePrefetchType prefetchType) => PrefetchInt16(mask, address, prefetchType); + + + /// PrefetchInt32 : Prefetch words + + /// + /// void svprfw(svbool_t pg, const void *base, enum svprfop op) + /// + public static unsafe void PrefetchInt32(Vector mask, void* address, [ConstantExpected] SvePrefetchType prefetchType) => PrefetchInt32(mask, address, prefetchType); + + + /// PrefetchInt64 : Prefetch doublewords + + /// + /// void svprfd(svbool_t pg, const void *base, enum svprfop op) + /// + public static unsafe void PrefetchInt64(Vector mask, void* address, [ConstantExpected] SvePrefetchType prefetchType) => PrefetchInt64(mask, address, prefetchType); + + + /// ReciprocalEstimate : Reciprocal estimate + + /// + /// svfloat32_t svrecpe[_f32](svfloat32_t op) + /// + public static unsafe Vector ReciprocalEstimate(Vector value) => ReciprocalEstimate(value); + + /// + /// svfloat64_t svrecpe[_f64](svfloat64_t op) + /// + public static unsafe Vector ReciprocalEstimate(Vector value) => ReciprocalEstimate(value); + + + /// ReciprocalExponent : Reciprocal exponent + + /// + /// svfloat32_t svrecpx[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) + /// svfloat32_t svrecpx[_f32]_x(svbool_t pg, svfloat32_t op) + /// svfloat32_t svrecpx[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector ReciprocalExponent(Vector value) => ReciprocalExponent(value); + + /// + /// svfloat64_t svrecpx[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) + /// svfloat64_t svrecpx[_f64]_x(svbool_t pg, svfloat64_t op) + /// svfloat64_t svrecpx[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector ReciprocalExponent(Vector value) => ReciprocalExponent(value); + + + /// ReciprocalSqrtEstimate : Reciprocal square root estimate + + /// + /// svfloat32_t svrsqrte[_f32](svfloat32_t op) + /// + public static unsafe Vector ReciprocalSqrtEstimate(Vector value) => ReciprocalSqrtEstimate(value); + + /// + /// svfloat64_t svrsqrte[_f64](svfloat64_t op) + /// + public static unsafe Vector ReciprocalSqrtEstimate(Vector value) => ReciprocalSqrtEstimate(value); + + + /// ReciprocalSqrtStep : Reciprocal square root step + + /// + /// svfloat32_t svrsqrts[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector ReciprocalSqrtStep(Vector left, Vector right) => ReciprocalSqrtStep(left, right); + + /// + /// svfloat64_t svrsqrts[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector ReciprocalSqrtStep(Vector left, Vector right) => ReciprocalSqrtStep(left, right); + + + /// ReciprocalStep : Reciprocal step + + /// + /// svfloat32_t svrecps[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector ReciprocalStep(Vector left, Vector right) => ReciprocalStep(left, right); + + /// + /// svfloat64_t svrecps[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector ReciprocalStep(Vector left, Vector right) => ReciprocalStep(left, right); + + + /// ReverseBits : Reverse bits + + /// + /// svint8_t svrbit[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) + /// svint8_t svrbit[_s8]_x(svbool_t pg, svint8_t op) + /// svint8_t svrbit[_s8]_z(svbool_t pg, svint8_t op) + /// + public static unsafe Vector ReverseBits(Vector value) => ReverseBits(value); + + /// + /// svint16_t svrbit[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) + /// svint16_t svrbit[_s16]_x(svbool_t pg, svint16_t op) + /// svint16_t svrbit[_s16]_z(svbool_t pg, svint16_t op) + /// + public static unsafe Vector ReverseBits(Vector value) => ReverseBits(value); + + /// + /// svint32_t svrbit[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// svint32_t svrbit[_s32]_x(svbool_t pg, svint32_t op) + /// svint32_t svrbit[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector ReverseBits(Vector value) => ReverseBits(value); + + /// + /// svint64_t svrbit[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// svint64_t svrbit[_s64]_x(svbool_t pg, svint64_t op) + /// svint64_t svrbit[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector ReverseBits(Vector value) => ReverseBits(value); + + /// + /// svuint8_t svrbit[_u8]_m(svuint8_t inactive, svbool_t pg, svuint8_t op) + /// svuint8_t svrbit[_u8]_x(svbool_t pg, svuint8_t op) + /// svuint8_t svrbit[_u8]_z(svbool_t pg, svuint8_t op) + /// + public static unsafe Vector ReverseBits(Vector value) => ReverseBits(value); + + /// + /// svuint16_t svrbit[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) + /// svuint16_t svrbit[_u16]_x(svbool_t pg, svuint16_t op) + /// svuint16_t svrbit[_u16]_z(svbool_t pg, svuint16_t op) + /// + public static unsafe Vector ReverseBits(Vector value) => ReverseBits(value); + + /// + /// svuint32_t svrbit[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// svuint32_t svrbit[_u32]_x(svbool_t pg, svuint32_t op) + /// svuint32_t svrbit[_u32]_z(svbool_t pg, svuint32_t op) + /// + public static unsafe Vector ReverseBits(Vector value) => ReverseBits(value); + + /// + /// svuint64_t svrbit[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// svuint64_t svrbit[_u64]_x(svbool_t pg, svuint64_t op) + /// svuint64_t svrbit[_u64]_z(svbool_t pg, svuint64_t op) + /// + public static unsafe Vector ReverseBits(Vector value) => ReverseBits(value); + + + /// ReverseElement : Reverse all elements + + /// + /// svint8_t svrev[_s8](svint8_t op) + /// + public static unsafe Vector ReverseElement(Vector value) => ReverseElement(value); + + /// + /// svint16_t svrev[_s16](svint16_t op) + /// + public static unsafe Vector ReverseElement(Vector value) => ReverseElement(value); + + /// + /// svint32_t svrev[_s32](svint32_t op) + /// + public static unsafe Vector ReverseElement(Vector value) => ReverseElement(value); + + /// + /// svint64_t svrev[_s64](svint64_t op) + /// + public static unsafe Vector ReverseElement(Vector value) => ReverseElement(value); + + /// + /// svuint8_t svrev[_u8](svuint8_t op) + /// svbool_t svrev_b8(svbool_t op) + /// + public static unsafe Vector ReverseElement(Vector value) => ReverseElement(value); + + /// + /// svuint16_t svrev[_u16](svuint16_t op) + /// svbool_t svrev_b16(svbool_t op) + /// + public static unsafe Vector ReverseElement(Vector value) => ReverseElement(value); + + /// + /// svuint32_t svrev[_u32](svuint32_t op) + /// svbool_t svrev_b32(svbool_t op) + /// + public static unsafe Vector ReverseElement(Vector value) => ReverseElement(value); + + /// + /// svuint64_t svrev[_u64](svuint64_t op) + /// svbool_t svrev_b64(svbool_t op) + /// + public static unsafe Vector ReverseElement(Vector value) => ReverseElement(value); + + /// + /// svfloat32_t svrev[_f32](svfloat32_t op) + /// + public static unsafe Vector ReverseElement(Vector value) => ReverseElement(value); + + /// + /// svfloat64_t svrev[_f64](svfloat64_t op) + /// + public static unsafe Vector ReverseElement(Vector value) => ReverseElement(value); + + + /// ReverseElement16 : Reverse halfwords within elements + + /// + /// svint32_t svrevh[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// svint32_t svrevh[_s32]_x(svbool_t pg, svint32_t op) + /// svint32_t svrevh[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector ReverseElement16(Vector value) => ReverseElement16(value); + + /// + /// svint64_t svrevh[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// svint64_t svrevh[_s64]_x(svbool_t pg, svint64_t op) + /// svint64_t svrevh[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector ReverseElement16(Vector value) => ReverseElement16(value); + + /// + /// svuint32_t svrevh[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// svuint32_t svrevh[_u32]_x(svbool_t pg, svuint32_t op) + /// svuint32_t svrevh[_u32]_z(svbool_t pg, svuint32_t op) + /// + public static unsafe Vector ReverseElement16(Vector value) => ReverseElement16(value); + + /// + /// svuint64_t svrevh[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// svuint64_t svrevh[_u64]_x(svbool_t pg, svuint64_t op) + /// svuint64_t svrevh[_u64]_z(svbool_t pg, svuint64_t op) + /// + public static unsafe Vector ReverseElement16(Vector value) => ReverseElement16(value); + + + /// ReverseElement32 : Reverse words within elements + + /// + /// svint64_t svrevw[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// svint64_t svrevw[_s64]_x(svbool_t pg, svint64_t op) + /// svint64_t svrevw[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector ReverseElement32(Vector value) => ReverseElement32(value); + + /// + /// svuint64_t svrevw[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// svuint64_t svrevw[_u64]_x(svbool_t pg, svuint64_t op) + /// svuint64_t svrevw[_u64]_z(svbool_t pg, svuint64_t op) + /// + public static unsafe Vector ReverseElement32(Vector value) => ReverseElement32(value); + + + /// ReverseElement8 : Reverse bytes within elements + + /// + /// svint16_t svrevb[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) + /// svint16_t svrevb[_s16]_x(svbool_t pg, svint16_t op) + /// svint16_t svrevb[_s16]_z(svbool_t pg, svint16_t op) + /// + public static unsafe Vector ReverseElement8(Vector value) => ReverseElement8(value); + + /// + /// svint32_t svrevb[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// svint32_t svrevb[_s32]_x(svbool_t pg, svint32_t op) + /// svint32_t svrevb[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector ReverseElement8(Vector value) => ReverseElement8(value); + + /// + /// svint64_t svrevb[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// svint64_t svrevb[_s64]_x(svbool_t pg, svint64_t op) + /// svint64_t svrevb[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector ReverseElement8(Vector value) => ReverseElement8(value); + + /// + /// svuint16_t svrevb[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) + /// svuint16_t svrevb[_u16]_x(svbool_t pg, svuint16_t op) + /// svuint16_t svrevb[_u16]_z(svbool_t pg, svuint16_t op) + /// + public static unsafe Vector ReverseElement8(Vector value) => ReverseElement8(value); + + /// + /// svuint32_t svrevb[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// svuint32_t svrevb[_u32]_x(svbool_t pg, svuint32_t op) + /// svuint32_t svrevb[_u32]_z(svbool_t pg, svuint32_t op) + /// + public static unsafe Vector ReverseElement8(Vector value) => ReverseElement8(value); + + /// + /// svuint64_t svrevb[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// svuint64_t svrevb[_u64]_x(svbool_t pg, svuint64_t op) + /// svuint64_t svrevb[_u64]_z(svbool_t pg, svuint64_t op) + /// + public static unsafe Vector ReverseElement8(Vector value) => ReverseElement8(value); + + + /// RoundAwayFromZero : Round to nearest, ties away from zero + + /// + /// svfloat32_t svrinta[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) + /// svfloat32_t svrinta[_f32]_x(svbool_t pg, svfloat32_t op) + /// svfloat32_t svrinta[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector RoundAwayFromZero(Vector value) => RoundAwayFromZero(value); + + /// + /// svfloat64_t svrinta[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) + /// svfloat64_t svrinta[_f64]_x(svbool_t pg, svfloat64_t op) + /// svfloat64_t svrinta[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector RoundAwayFromZero(Vector value) => RoundAwayFromZero(value); + + + /// RoundToNearest : Round to nearest, ties to even + + /// + /// svfloat32_t svrintn[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) + /// svfloat32_t svrintn[_f32]_x(svbool_t pg, svfloat32_t op) + /// svfloat32_t svrintn[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector RoundToNearest(Vector value) => RoundToNearest(value); + + /// + /// svfloat64_t svrintn[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) + /// svfloat64_t svrintn[_f64]_x(svbool_t pg, svfloat64_t op) + /// svfloat64_t svrintn[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector RoundToNearest(Vector value) => RoundToNearest(value); + + + /// RoundToNegativeInfinity : Round towards -∞ + + /// + /// svfloat32_t svrintm[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) + /// svfloat32_t svrintm[_f32]_x(svbool_t pg, svfloat32_t op) + /// svfloat32_t svrintm[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector RoundToNegativeInfinity(Vector value) => RoundToNegativeInfinity(value); + + /// + /// svfloat64_t svrintm[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) + /// svfloat64_t svrintm[_f64]_x(svbool_t pg, svfloat64_t op) + /// svfloat64_t svrintm[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector RoundToNegativeInfinity(Vector value) => RoundToNegativeInfinity(value); + + + /// RoundToPositiveInfinity : Round towards +∞ + + /// + /// svfloat32_t svrintp[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) + /// svfloat32_t svrintp[_f32]_x(svbool_t pg, svfloat32_t op) + /// svfloat32_t svrintp[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector RoundToPositiveInfinity(Vector value) => RoundToPositiveInfinity(value); + + /// + /// svfloat64_t svrintp[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) + /// svfloat64_t svrintp[_f64]_x(svbool_t pg, svfloat64_t op) + /// svfloat64_t svrintp[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector RoundToPositiveInfinity(Vector value) => RoundToPositiveInfinity(value); + + + /// RoundToZero : Round towards zero + + /// + /// svfloat32_t svrintz[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) + /// svfloat32_t svrintz[_f32]_x(svbool_t pg, svfloat32_t op) + /// svfloat32_t svrintz[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector RoundToZero(Vector value) => RoundToZero(value); + + /// + /// svfloat64_t svrintz[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) + /// svfloat64_t svrintz[_f64]_x(svbool_t pg, svfloat64_t op) + /// svfloat64_t svrintz[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector RoundToZero(Vector value) => RoundToZero(value); + + + + + /// SaturatingDecrementBy16BitElementCount : Saturating decrement by number of halfword elements + + /// + /// int32_t svqdech_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe int SaturatingDecrementBy16BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy16BitElementCount(value, scale, pattern); + + /// + /// int64_t svqdech_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe long SaturatingDecrementBy16BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy16BitElementCount(value, scale, pattern); + + /// + /// uint32_t svqdech_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe uint SaturatingDecrementBy16BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy16BitElementCount(value, scale, pattern); + + /// + /// uint64_t svqdech_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe ulong SaturatingDecrementBy16BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy16BitElementCount(value, scale, pattern); + + /// + /// svint16_t svqdech_pat[_s16](svint16_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe Vector SaturatingDecrementBy16BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy16BitElementCount(value, scale, pattern); + + /// + /// svuint16_t svqdech_pat[_u16](svuint16_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe Vector SaturatingDecrementBy16BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy16BitElementCount(value, scale, pattern); + + + /// SaturatingDecrementBy32BitElementCount : Saturating decrement by number of word elements + + /// + /// int32_t svqdecw_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe int SaturatingDecrementBy32BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy32BitElementCount(value, scale, pattern); + + /// + /// int64_t svqdecw_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe long SaturatingDecrementBy32BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy32BitElementCount(value, scale, pattern); + + /// + /// uint32_t svqdecw_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe uint SaturatingDecrementBy32BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy32BitElementCount(value, scale, pattern); + + /// + /// uint64_t svqdecw_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe ulong SaturatingDecrementBy32BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy32BitElementCount(value, scale, pattern); + + /// + /// svint32_t svqdecw_pat[_s32](svint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe Vector SaturatingDecrementBy32BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy32BitElementCount(value, scale, pattern); + + /// + /// svuint32_t svqdecw_pat[_u32](svuint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe Vector SaturatingDecrementBy32BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy32BitElementCount(value, scale, pattern); + + + /// SaturatingDecrementBy64BitElementCount : Saturating decrement by number of doubleword elements + + /// + /// int32_t svqdecd_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe int SaturatingDecrementBy64BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy64BitElementCount(value, scale, pattern); + + /// + /// int64_t svqdecd_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe long SaturatingDecrementBy64BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy64BitElementCount(value, scale, pattern); + + /// + /// uint32_t svqdecd_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe uint SaturatingDecrementBy64BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy64BitElementCount(value, scale, pattern); + + /// + /// uint64_t svqdecd_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe ulong SaturatingDecrementBy64BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy64BitElementCount(value, scale, pattern); + + /// + /// svint64_t svqdecd_pat[_s64](svint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe Vector SaturatingDecrementBy64BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy64BitElementCount(value, scale, pattern); + + /// + /// svuint64_t svqdecd_pat[_u64](svuint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe Vector SaturatingDecrementBy64BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy64BitElementCount(value, scale, pattern); + + + /// SaturatingDecrementBy8BitElementCount : Saturating decrement by number of byte elements + + /// + /// int32_t svqdecb_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe int SaturatingDecrementBy8BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy8BitElementCount(value, scale, pattern); + + /// + /// int64_t svqdecb_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe long SaturatingDecrementBy8BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy8BitElementCount(value, scale, pattern); + + /// + /// uint32_t svqdecb_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe uint SaturatingDecrementBy8BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy8BitElementCount(value, scale, pattern); + + /// + /// uint64_t svqdecb_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe ulong SaturatingDecrementBy8BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy8BitElementCount(value, scale, pattern); + + + /// SaturatingDecrementByActiveElementCount : Saturating decrement by active element count + + /// + /// svint16_t svqdecp[_s16](svint16_t op, svbool_t pg) + /// + public static unsafe Vector SaturatingDecrementByActiveElementCount(Vector value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// svint32_t svqdecp[_s32](svint32_t op, svbool_t pg) + /// + public static unsafe Vector SaturatingDecrementByActiveElementCount(Vector value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// svint64_t svqdecp[_s64](svint64_t op, svbool_t pg) + /// + public static unsafe Vector SaturatingDecrementByActiveElementCount(Vector value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// int32_t svqdecp[_n_s32]_b8(int32_t op, svbool_t pg) + /// + public static unsafe int SaturatingDecrementByActiveElementCount(int value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// int64_t svqdecp[_n_s64]_b8(int64_t op, svbool_t pg) + /// + public static unsafe long SaturatingDecrementByActiveElementCount(long value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// uint32_t svqdecp[_n_u32]_b8(uint32_t op, svbool_t pg) + /// + public static unsafe uint SaturatingDecrementByActiveElementCount(uint value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// uint64_t svqdecp[_n_u64]_b8(uint64_t op, svbool_t pg) + /// + public static unsafe ulong SaturatingDecrementByActiveElementCount(ulong value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// int32_t svqdecp[_n_s32]_b16(int32_t op, svbool_t pg) + /// + public static unsafe int SaturatingDecrementByActiveElementCount(int value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// int64_t svqdecp[_n_s64]_b16(int64_t op, svbool_t pg) + /// + public static unsafe long SaturatingDecrementByActiveElementCount(long value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// uint32_t svqdecp[_n_u32]_b16(uint32_t op, svbool_t pg) + /// + public static unsafe uint SaturatingDecrementByActiveElementCount(uint value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// uint64_t svqdecp[_n_u64]_b16(uint64_t op, svbool_t pg) + /// + public static unsafe ulong SaturatingDecrementByActiveElementCount(ulong value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// svuint16_t svqdecp[_u16](svuint16_t op, svbool_t pg) + /// + public static unsafe Vector SaturatingDecrementByActiveElementCount(Vector value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// int32_t svqdecp[_n_s32]_b32(int32_t op, svbool_t pg) + /// + public static unsafe int SaturatingDecrementByActiveElementCount(int value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// int64_t svqdecp[_n_s64]_b32(int64_t op, svbool_t pg) + /// + public static unsafe long SaturatingDecrementByActiveElementCount(long value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// uint32_t svqdecp[_n_u32]_b32(uint32_t op, svbool_t pg) + /// + public static unsafe uint SaturatingDecrementByActiveElementCount(uint value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// uint64_t svqdecp[_n_u64]_b32(uint64_t op, svbool_t pg) + /// + public static unsafe ulong SaturatingDecrementByActiveElementCount(ulong value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// svuint32_t svqdecp[_u32](svuint32_t op, svbool_t pg) + /// + public static unsafe Vector SaturatingDecrementByActiveElementCount(Vector value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// int32_t svqdecp[_n_s32]_b64(int32_t op, svbool_t pg) + /// + public static unsafe int SaturatingDecrementByActiveElementCount(int value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// int64_t svqdecp[_n_s64]_b64(int64_t op, svbool_t pg) + /// + public static unsafe long SaturatingDecrementByActiveElementCount(long value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// uint32_t svqdecp[_n_u32]_b64(uint32_t op, svbool_t pg) + /// + public static unsafe uint SaturatingDecrementByActiveElementCount(uint value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// uint64_t svqdecp[_n_u64]_b64(uint64_t op, svbool_t pg) + /// + public static unsafe ulong SaturatingDecrementByActiveElementCount(ulong value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// svuint64_t svqdecp[_u64](svuint64_t op, svbool_t pg) + /// + public static unsafe Vector SaturatingDecrementByActiveElementCount(Vector value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + + /// SaturatingIncrementBy16BitElementCount : Saturating increment by number of halfword elements + + /// + /// int32_t svqinch_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe int SaturatingIncrementBy16BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy16BitElementCount(value, scale, pattern); + + /// + /// int64_t svqinch_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe long SaturatingIncrementBy16BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy16BitElementCount(value, scale, pattern); + + /// + /// uint32_t svqinch_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe uint SaturatingIncrementBy16BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy16BitElementCount(value, scale, pattern); + + /// + /// uint64_t svqinch_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe ulong SaturatingIncrementBy16BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy16BitElementCount(value, scale, pattern); + + /// + /// svint16_t svqinch_pat[_s16](svint16_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe Vector SaturatingIncrementBy16BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy16BitElementCount(value, scale, pattern); + + /// + /// svuint16_t svqinch_pat[_u16](svuint16_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe Vector SaturatingIncrementBy16BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy16BitElementCount(value, scale, pattern); + + + /// SaturatingIncrementBy32BitElementCount : Saturating increment by number of word elements + + /// + /// int32_t svqincw_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe int SaturatingIncrementBy32BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy32BitElementCount(value, scale, pattern); + + /// + /// int64_t svqincw_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe long SaturatingIncrementBy32BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy32BitElementCount(value, scale, pattern); + + /// + /// uint32_t svqincw_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe uint SaturatingIncrementBy32BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy32BitElementCount(value, scale, pattern); + + /// + /// uint64_t svqincw_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe ulong SaturatingIncrementBy32BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy32BitElementCount(value, scale, pattern); + + /// + /// svint32_t svqincw_pat[_s32](svint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe Vector SaturatingIncrementBy32BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy32BitElementCount(value, scale, pattern); + + /// + /// svuint32_t svqincw_pat[_u32](svuint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe Vector SaturatingIncrementBy32BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy32BitElementCount(value, scale, pattern); + + + /// SaturatingIncrementBy64BitElementCount : Saturating increment by number of doubleword elements + + /// + /// int32_t svqincd_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe int SaturatingIncrementBy64BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy64BitElementCount(value, scale, pattern); + + /// + /// int64_t svqincd_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe long SaturatingIncrementBy64BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy64BitElementCount(value, scale, pattern); + + /// + /// uint32_t svqincd_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe uint SaturatingIncrementBy64BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy64BitElementCount(value, scale, pattern); + + /// + /// uint64_t svqincd_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe ulong SaturatingIncrementBy64BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy64BitElementCount(value, scale, pattern); + + /// + /// svint64_t svqincd_pat[_s64](svint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe Vector SaturatingIncrementBy64BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy64BitElementCount(value, scale, pattern); + + /// + /// svuint64_t svqincd_pat[_u64](svuint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe Vector SaturatingIncrementBy64BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy64BitElementCount(value, scale, pattern); + + + /// SaturatingIncrementBy8BitElementCount : Saturating increment by number of byte elements + + /// + /// int32_t svqincb_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe int SaturatingIncrementBy8BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy8BitElementCount(value, scale, pattern); + + /// + /// int64_t svqincb_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe long SaturatingIncrementBy8BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy8BitElementCount(value, scale, pattern); + + /// + /// uint32_t svqincb_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe uint SaturatingIncrementBy8BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy8BitElementCount(value, scale, pattern); + + /// + /// uint64_t svqincb_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// + public static unsafe ulong SaturatingIncrementBy8BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy8BitElementCount(value, scale, pattern); + + + /// SaturatingIncrementByActiveElementCount : Saturating increment by active element count + + /// + /// svint16_t svqincp[_s16](svint16_t op, svbool_t pg) + /// + public static unsafe Vector SaturatingIncrementByActiveElementCount(Vector value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// svint32_t svqincp[_s32](svint32_t op, svbool_t pg) + /// + public static unsafe Vector SaturatingIncrementByActiveElementCount(Vector value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// svint64_t svqincp[_s64](svint64_t op, svbool_t pg) + /// + public static unsafe Vector SaturatingIncrementByActiveElementCount(Vector value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// int32_t svqincp[_n_s32]_b8(int32_t op, svbool_t pg) + /// + public static unsafe int SaturatingIncrementByActiveElementCount(int value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// int64_t svqincp[_n_s64]_b8(int64_t op, svbool_t pg) + /// + public static unsafe long SaturatingIncrementByActiveElementCount(long value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// uint32_t svqincp[_n_u32]_b8(uint32_t op, svbool_t pg) + /// + public static unsafe uint SaturatingIncrementByActiveElementCount(uint value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// uint64_t svqincp[_n_u64]_b8(uint64_t op, svbool_t pg) + /// + public static unsafe ulong SaturatingIncrementByActiveElementCount(ulong value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// int32_t svqincp[_n_s32]_b16(int32_t op, svbool_t pg) + /// + public static unsafe int SaturatingIncrementByActiveElementCount(int value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// int64_t svqincp[_n_s64]_b16(int64_t op, svbool_t pg) + /// + public static unsafe long SaturatingIncrementByActiveElementCount(long value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// uint32_t svqincp[_n_u32]_b16(uint32_t op, svbool_t pg) + /// + public static unsafe uint SaturatingIncrementByActiveElementCount(uint value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// uint64_t svqincp[_n_u64]_b16(uint64_t op, svbool_t pg) + /// + public static unsafe ulong SaturatingIncrementByActiveElementCount(ulong value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// svuint16_t svqincp[_u16](svuint16_t op, svbool_t pg) + /// + public static unsafe Vector SaturatingIncrementByActiveElementCount(Vector value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// int32_t svqincp[_n_s32]_b32(int32_t op, svbool_t pg) + /// + public static unsafe int SaturatingIncrementByActiveElementCount(int value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// int64_t svqincp[_n_s64]_b32(int64_t op, svbool_t pg) + /// + public static unsafe long SaturatingIncrementByActiveElementCount(long value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// uint32_t svqincp[_n_u32]_b32(uint32_t op, svbool_t pg) + /// + public static unsafe uint SaturatingIncrementByActiveElementCount(uint value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// uint64_t svqincp[_n_u64]_b32(uint64_t op, svbool_t pg) + /// + public static unsafe ulong SaturatingIncrementByActiveElementCount(ulong value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// svuint32_t svqincp[_u32](svuint32_t op, svbool_t pg) + /// + public static unsafe Vector SaturatingIncrementByActiveElementCount(Vector value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// int32_t svqincp[_n_s32]_b64(int32_t op, svbool_t pg) + /// + public static unsafe int SaturatingIncrementByActiveElementCount(int value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// int64_t svqincp[_n_s64]_b64(int64_t op, svbool_t pg) + /// + public static unsafe long SaturatingIncrementByActiveElementCount(long value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// uint32_t svqincp[_n_u32]_b64(uint32_t op, svbool_t pg) + /// + public static unsafe uint SaturatingIncrementByActiveElementCount(uint value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// uint64_t svqincp[_n_u64]_b64(uint64_t op, svbool_t pg) + /// + public static unsafe ulong SaturatingIncrementByActiveElementCount(ulong value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// svuint64_t svqincp[_u64](svuint64_t op, svbool_t pg) + /// + public static unsafe Vector SaturatingIncrementByActiveElementCount(Vector value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + + /// Scale : Adjust exponent + + /// + /// svfloat32_t svscale[_f32]_m(svbool_t pg, svfloat32_t op1, svint32_t op2) + /// svfloat32_t svscale[_f32]_x(svbool_t pg, svfloat32_t op1, svint32_t op2) + /// svfloat32_t svscale[_f32]_z(svbool_t pg, svfloat32_t op1, svint32_t op2) + /// + public static unsafe Vector Scale(Vector left, Vector right) => Scale(left, right); + + /// + /// svfloat64_t svscale[_f64]_m(svbool_t pg, svfloat64_t op1, svint64_t op2) + /// svfloat64_t svscale[_f64]_x(svbool_t pg, svfloat64_t op1, svint64_t op2) + /// svfloat64_t svscale[_f64]_z(svbool_t pg, svfloat64_t op1, svint64_t op2) + /// + public static unsafe Vector Scale(Vector left, Vector right) => Scale(left, right); + + + /// Scatter : Non-truncating store + + /// + /// void svst1_scatter_[s32]offset[_s32](svbool_t pg, int32_t *base, svint32_t offsets, svint32_t data) + /// void svst1_scatter_[s32]index[_s32](svbool_t pg, int32_t *base, svint32_t indices, svint32_t data) + /// + public static unsafe void Scatter(Vector mask, int* address, Vector indicies, Vector data) => Scatter(mask, address, indicies, data); + + /// + /// void svst1_scatter[_u32base_s32](svbool_t pg, svuint32_t bases, svint32_t data) + /// + public static unsafe void Scatter(Vector mask, Vector addresses, Vector data) => Scatter(mask, addresses, data); + + /// + /// void svst1_scatter_[u32]offset[_s32](svbool_t pg, int32_t *base, svuint32_t offsets, svint32_t data) + /// void svst1_scatter_[u32]index[_s32](svbool_t pg, int32_t *base, svuint32_t indices, svint32_t data) + /// + public static unsafe void Scatter(Vector mask, int* address, Vector indicies, Vector data) => Scatter(mask, address, indicies, data); + + /// + /// void svst1_scatter_[s64]offset[_s64](svbool_t pg, int64_t *base, svint64_t offsets, svint64_t data) + /// void svst1_scatter_[s64]index[_s64](svbool_t pg, int64_t *base, svint64_t indices, svint64_t data) + /// + public static unsafe void Scatter(Vector mask, long* address, Vector indicies, Vector data) => Scatter(mask, address, indicies, data); + + /// + /// void svst1_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) + /// + public static unsafe void Scatter(Vector mask, Vector addresses, Vector data) => Scatter(mask, addresses, data); + + /// + /// void svst1_scatter_[u64]offset[_s64](svbool_t pg, int64_t *base, svuint64_t offsets, svint64_t data) + /// void svst1_scatter_[u64]index[_s64](svbool_t pg, int64_t *base, svuint64_t indices, svint64_t data) + /// + public static unsafe void Scatter(Vector mask, long* address, Vector indicies, Vector data) => Scatter(mask, address, indicies, data); + + /// + /// void svst1_scatter_[s32]offset[_u32](svbool_t pg, uint32_t *base, svint32_t offsets, svuint32_t data) + /// void svst1_scatter_[s32]index[_u32](svbool_t pg, uint32_t *base, svint32_t indices, svuint32_t data) + /// + public static unsafe void Scatter(Vector mask, uint* address, Vector indicies, Vector data) => Scatter(mask, address, indicies, data); + + /// + /// void svst1_scatter[_u32base_u32](svbool_t pg, svuint32_t bases, svuint32_t data) + /// + public static unsafe void Scatter(Vector mask, Vector addresses, Vector data) => Scatter(mask, addresses, data); + + /// + /// void svst1_scatter_[u32]offset[_u32](svbool_t pg, uint32_t *base, svuint32_t offsets, svuint32_t data) + /// void svst1_scatter_[u32]index[_u32](svbool_t pg, uint32_t *base, svuint32_t indices, svuint32_t data) + /// + public static unsafe void Scatter(Vector mask, uint* address, Vector indicies, Vector data) => Scatter(mask, address, indicies, data); + + /// + /// void svst1_scatter_[s64]offset[_u64](svbool_t pg, uint64_t *base, svint64_t offsets, svuint64_t data) + /// void svst1_scatter_[s64]index[_u64](svbool_t pg, uint64_t *base, svint64_t indices, svuint64_t data) + /// + public static unsafe void Scatter(Vector mask, ulong* address, Vector indicies, Vector data) => Scatter(mask, address, indicies, data); + + /// + /// void svst1_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) + /// + public static unsafe void Scatter(Vector mask, Vector addresses, Vector data) => Scatter(mask, addresses, data); + + /// + /// void svst1_scatter_[u64]offset[_u64](svbool_t pg, uint64_t *base, svuint64_t offsets, svuint64_t data) + /// void svst1_scatter_[u64]index[_u64](svbool_t pg, uint64_t *base, svuint64_t indices, svuint64_t data) + /// + public static unsafe void Scatter(Vector mask, ulong* address, Vector indicies, Vector data) => Scatter(mask, address, indicies, data); + + /// + /// void svst1_scatter_[s32]offset[_f32](svbool_t pg, float32_t *base, svint32_t offsets, svfloat32_t data) + /// void svst1_scatter_[s32]index[_f32](svbool_t pg, float32_t *base, svint32_t indices, svfloat32_t data) + /// + public static unsafe void Scatter(Vector mask, float* address, Vector indicies, Vector data) => Scatter(mask, address, indicies, data); + + /// + /// void svst1_scatter[_u32base_f32](svbool_t pg, svuint32_t bases, svfloat32_t data) + /// + public static unsafe void Scatter(Vector mask, Vector addresses, Vector data) => Scatter(mask, addresses, data); + + /// + /// void svst1_scatter_[u32]offset[_f32](svbool_t pg, float32_t *base, svuint32_t offsets, svfloat32_t data) + /// void svst1_scatter_[u32]index[_f32](svbool_t pg, float32_t *base, svuint32_t indices, svfloat32_t data) + /// + public static unsafe void Scatter(Vector mask, float* address, Vector indicies, Vector data) => Scatter(mask, address, indicies, data); + + /// + /// void svst1_scatter_[s64]offset[_f64](svbool_t pg, float64_t *base, svint64_t offsets, svfloat64_t data) + /// void svst1_scatter_[s64]index[_f64](svbool_t pg, float64_t *base, svint64_t indices, svfloat64_t data) + /// + public static unsafe void Scatter(Vector mask, double* address, Vector indicies, Vector data) => Scatter(mask, address, indicies, data); + + /// + /// void svst1_scatter[_u64base_f64](svbool_t pg, svuint64_t bases, svfloat64_t data) + /// + public static unsafe void Scatter(Vector mask, Vector addresses, Vector data) => Scatter(mask, addresses, data); + + /// + /// void svst1_scatter_[u64]offset[_f64](svbool_t pg, float64_t *base, svuint64_t offsets, svfloat64_t data) + /// void svst1_scatter_[u64]index[_f64](svbool_t pg, float64_t *base, svuint64_t indices, svfloat64_t data) + /// + public static unsafe void Scatter(Vector mask, double* address, Vector indicies, Vector data) => Scatter(mask, address, indicies, data); + + + /// Scatter16BitNarrowing : Truncate to 16 bits and store + + /// + /// void svst1h_scatter[_u32base_s32](svbool_t pg, svuint32_t bases, svint32_t data) + /// + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter16BitNarrowing(mask, addresses, data); + + /// + /// void svst1h_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) + /// + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter16BitNarrowing(mask, addresses, data); + + /// + /// void svst1h_scatter[_u32base_u32](svbool_t pg, svuint32_t bases, svuint32_t data) + /// + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter16BitNarrowing(mask, addresses, data); + + /// + /// void svst1h_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) + /// + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter16BitNarrowing(mask, addresses, data); + + + /// Scatter16BitWithByteOffsetsNarrowing : Truncate to 16 bits and store + + /// + /// void svst1h_scatter_[s32]offset[_s32](svbool_t pg, int16_t *base, svint32_t offsets, svint32_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1h_scatter_[u32]offset[_s32](svbool_t pg, int16_t *base, svuint32_t offsets, svint32_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1h_scatter_[s32]index[_s32](svbool_t pg, int16_t *base, svint32_t indices, svint32_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svst1h_scatter_[u32]index[_s32](svbool_t pg, int16_t *base, svuint32_t indices, svint32_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svst1h_scatter_[s64]offset[_s64](svbool_t pg, int16_t *base, svint64_t offsets, svint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1h_scatter_[u64]offset[_s64](svbool_t pg, int16_t *base, svuint64_t offsets, svint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1h_scatter_[s64]index[_s64](svbool_t pg, int16_t *base, svint64_t indices, svint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svst1h_scatter_[u64]index[_s64](svbool_t pg, int16_t *base, svuint64_t indices, svint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svst1h_scatter_[s32]offset[_u32](svbool_t pg, uint16_t *base, svint32_t offsets, svuint32_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1h_scatter_[u32]offset[_u32](svbool_t pg, uint16_t *base, svuint32_t offsets, svuint32_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1h_scatter_[s32]index[_u32](svbool_t pg, uint16_t *base, svint32_t indices, svuint32_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svst1h_scatter_[u32]index[_u32](svbool_t pg, uint16_t *base, svuint32_t indices, svuint32_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svst1h_scatter_[s64]offset[_u64](svbool_t pg, uint16_t *base, svint64_t offsets, svuint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1h_scatter_[u64]offset[_u64](svbool_t pg, uint16_t *base, svuint64_t offsets, svuint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1h_scatter_[s64]index[_u64](svbool_t pg, uint16_t *base, svint64_t indices, svuint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svst1h_scatter_[u64]index[_u64](svbool_t pg, uint16_t *base, svuint64_t indices, svuint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, indices, data); + + + /// Scatter32BitNarrowing : Truncate to 32 bits and store + + /// + /// void svst1w_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) + /// + public static unsafe void Scatter32BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter32BitNarrowing(mask, addresses, data); + + /// + /// void svst1w_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) + /// + public static unsafe void Scatter32BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter32BitNarrowing(mask, addresses, data); + + + /// Scatter32BitWithByteOffsetsNarrowing : Truncate to 32 bits and store + + /// + /// void svst1w_scatter_[s64]offset[_s64](svbool_t pg, int32_t *base, svint64_t offsets, svint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector offsets, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1w_scatter_[u64]offset[_s64](svbool_t pg, int32_t *base, svuint64_t offsets, svint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector offsets, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1w_scatter_[s64]index[_s64](svbool_t pg, int32_t *base, svint64_t indices, svint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector indices, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svst1w_scatter_[u64]index[_s64](svbool_t pg, int32_t *base, svuint64_t indices, svint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector indices, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svst1w_scatter_[s64]offset[_u64](svbool_t pg, uint32_t *base, svint64_t offsets, svuint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector offsets, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1w_scatter_[u64]offset[_u64](svbool_t pg, uint32_t *base, svuint64_t offsets, svuint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector offsets, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1w_scatter_[s64]index[_u64](svbool_t pg, uint32_t *base, svint64_t indices, svuint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector indices, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svst1w_scatter_[u64]index[_u64](svbool_t pg, uint32_t *base, svuint64_t indices, svuint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector indices, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, indices, data); + + + /// Scatter8BitNarrowing : Truncate to 8 bits and store + + /// + /// void svst1b_scatter[_u32base_s32](svbool_t pg, svuint32_t bases, svint32_t data) + /// + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter8BitNarrowing(mask, addresses, data); + + /// + /// void svst1b_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) + /// + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter8BitNarrowing(mask, addresses, data); + + /// + /// void svst1b_scatter[_u32base_u32](svbool_t pg, svuint32_t bases, svuint32_t data) + /// + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter8BitNarrowing(mask, addresses, data); + + /// + /// void svst1b_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) + /// + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter8BitNarrowing(mask, addresses, data); + + + /// Scatter8BitWithByteOffsetsNarrowing : Truncate to 8 bits and store + + /// + /// void svst1b_scatter_[s32]offset[_s32](svbool_t pg, int8_t *base, svint32_t offsets, svint32_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1b_scatter_[u32]offset[_s32](svbool_t pg, int8_t *base, svuint32_t offsets, svint32_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1b_scatter_[s64]offset[_s64](svbool_t pg, int8_t *base, svint64_t offsets, svint64_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1b_scatter_[u64]offset[_s64](svbool_t pg, int8_t *base, svuint64_t offsets, svint64_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1b_scatter_[s32]offset[_u32](svbool_t pg, uint8_t *base, svint32_t offsets, svuint32_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1b_scatter_[u32]offset[_u32](svbool_t pg, uint8_t *base, svuint32_t offsets, svuint32_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1b_scatter_[s64]offset[_u64](svbool_t pg, uint8_t *base, svint64_t offsets, svuint64_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1b_scatter_[u64]offset[_u64](svbool_t pg, uint8_t *base, svuint64_t offsets, svuint64_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + + /// SetFfr : Write to the first-fault register + + /// + /// void svwrffr(svbool_t op) + /// + public static unsafe void SetFfr(Vector value) => SetFfr(value); + + /// + /// void svwrffr(svbool_t op) + /// + public static unsafe void SetFfr(Vector value) => SetFfr(value); + + /// + /// void svwrffr(svbool_t op) + /// + public static unsafe void SetFfr(Vector value) => SetFfr(value); + + /// + /// void svwrffr(svbool_t op) + /// + public static unsafe void SetFfr(Vector value) => SetFfr(value); + + /// + /// void svwrffr(svbool_t op) + /// + public static unsafe void SetFfr(Vector value) => SetFfr(value); + + /// + /// void svwrffr(svbool_t op) + /// + public static unsafe void SetFfr(Vector value) => SetFfr(value); + + /// + /// void svwrffr(svbool_t op) + /// + public static unsafe void SetFfr(Vector value) => SetFfr(value); + + /// + /// void svwrffr(svbool_t op) + /// + public static unsafe void SetFfr(Vector value) => SetFfr(value); + + + /// ShiftLeftLogical : Logical shift left + + /// + /// svint8_t svlsl[_s8]_m(svbool_t pg, svint8_t op1, svuint8_t op2) + /// svint8_t svlsl[_s8]_x(svbool_t pg, svint8_t op1, svuint8_t op2) + /// svint8_t svlsl[_s8]_z(svbool_t pg, svint8_t op1, svuint8_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + /// + /// svint8_t svlsl_wide[_s8]_m(svbool_t pg, svint8_t op1, svuint64_t op2) + /// svint8_t svlsl_wide[_s8]_x(svbool_t pg, svint8_t op1, svuint64_t op2) + /// svint8_t svlsl_wide[_s8]_z(svbool_t pg, svint8_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + /// + /// svint16_t svlsl[_s16]_m(svbool_t pg, svint16_t op1, svuint16_t op2) + /// svint16_t svlsl[_s16]_x(svbool_t pg, svint16_t op1, svuint16_t op2) + /// svint16_t svlsl[_s16]_z(svbool_t pg, svint16_t op1, svuint16_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + /// + /// svint16_t svlsl_wide[_s16]_m(svbool_t pg, svint16_t op1, svuint64_t op2) + /// svint16_t svlsl_wide[_s16]_x(svbool_t pg, svint16_t op1, svuint64_t op2) + /// svint16_t svlsl_wide[_s16]_z(svbool_t pg, svint16_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + /// + /// svint32_t svlsl[_s32]_m(svbool_t pg, svint32_t op1, svuint32_t op2) + /// svint32_t svlsl[_s32]_x(svbool_t pg, svint32_t op1, svuint32_t op2) + /// svint32_t svlsl[_s32]_z(svbool_t pg, svint32_t op1, svuint32_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + /// + /// svint32_t svlsl_wide[_s32]_m(svbool_t pg, svint32_t op1, svuint64_t op2) + /// svint32_t svlsl_wide[_s32]_x(svbool_t pg, svint32_t op1, svuint64_t op2) + /// svint32_t svlsl_wide[_s32]_z(svbool_t pg, svint32_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + /// + /// svint64_t svlsl[_s64]_m(svbool_t pg, svint64_t op1, svuint64_t op2) + /// svint64_t svlsl[_s64]_x(svbool_t pg, svint64_t op1, svuint64_t op2) + /// svint64_t svlsl[_s64]_z(svbool_t pg, svint64_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + /// + /// svuint8_t svlsl[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svlsl[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svlsl[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + /// + /// svuint8_t svlsl_wide[_u8]_m(svbool_t pg, svuint8_t op1, svuint64_t op2) + /// svuint8_t svlsl_wide[_u8]_x(svbool_t pg, svuint8_t op1, svuint64_t op2) + /// svuint8_t svlsl_wide[_u8]_z(svbool_t pg, svuint8_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + /// + /// svuint16_t svlsl[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svlsl[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svlsl[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + /// + /// svuint16_t svlsl_wide[_u16]_m(svbool_t pg, svuint16_t op1, svuint64_t op2) + /// svuint16_t svlsl_wide[_u16]_x(svbool_t pg, svuint16_t op1, svuint64_t op2) + /// svuint16_t svlsl_wide[_u16]_z(svbool_t pg, svuint16_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + /// + /// svuint32_t svlsl[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svlsl[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svlsl[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + /// + /// svuint32_t svlsl_wide[_u32]_m(svbool_t pg, svuint32_t op1, svuint64_t op2) + /// svuint32_t svlsl_wide[_u32]_x(svbool_t pg, svuint32_t op1, svuint64_t op2) + /// svuint32_t svlsl_wide[_u32]_z(svbool_t pg, svuint32_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + /// + /// svuint64_t svlsl[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svlsl[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svlsl[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + + /// ShiftRightArithmetic : Arithmetic shift right + + /// + /// svint8_t svasr[_s8]_m(svbool_t pg, svint8_t op1, svuint8_t op2) + /// svint8_t svasr[_s8]_x(svbool_t pg, svint8_t op1, svuint8_t op2) + /// svint8_t svasr[_s8]_z(svbool_t pg, svint8_t op1, svuint8_t op2) + /// + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right) => ShiftRightArithmetic(left, right); + + /// + /// svint8_t svasr_wide[_s8]_m(svbool_t pg, svint8_t op1, svuint64_t op2) + /// svint8_t svasr_wide[_s8]_x(svbool_t pg, svint8_t op1, svuint64_t op2) + /// svint8_t svasr_wide[_s8]_z(svbool_t pg, svint8_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right) => ShiftRightArithmetic(left, right); + + /// + /// svint16_t svasr[_s16]_m(svbool_t pg, svint16_t op1, svuint16_t op2) + /// svint16_t svasr[_s16]_x(svbool_t pg, svint16_t op1, svuint16_t op2) + /// svint16_t svasr[_s16]_z(svbool_t pg, svint16_t op1, svuint16_t op2) + /// + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right) => ShiftRightArithmetic(left, right); + + /// + /// svint16_t svasr_wide[_s16]_m(svbool_t pg, svint16_t op1, svuint64_t op2) + /// svint16_t svasr_wide[_s16]_x(svbool_t pg, svint16_t op1, svuint64_t op2) + /// svint16_t svasr_wide[_s16]_z(svbool_t pg, svint16_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right) => ShiftRightArithmetic(left, right); + + /// + /// svint32_t svasr[_s32]_m(svbool_t pg, svint32_t op1, svuint32_t op2) + /// svint32_t svasr[_s32]_x(svbool_t pg, svint32_t op1, svuint32_t op2) + /// svint32_t svasr[_s32]_z(svbool_t pg, svint32_t op1, svuint32_t op2) + /// + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right) => ShiftRightArithmetic(left, right); + + /// + /// svint32_t svasr_wide[_s32]_m(svbool_t pg, svint32_t op1, svuint64_t op2) + /// svint32_t svasr_wide[_s32]_x(svbool_t pg, svint32_t op1, svuint64_t op2) + /// svint32_t svasr_wide[_s32]_z(svbool_t pg, svint32_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right) => ShiftRightArithmetic(left, right); + + /// + /// svint64_t svasr[_s64]_m(svbool_t pg, svint64_t op1, svuint64_t op2) + /// svint64_t svasr[_s64]_x(svbool_t pg, svint64_t op1, svuint64_t op2) + /// svint64_t svasr[_s64]_z(svbool_t pg, svint64_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right) => ShiftRightArithmetic(left, right); + + + /// ShiftRightArithmeticForDivide : Arithmetic shift right for divide by immediate + + /// + /// svint8_t svasrd[_n_s8]_m(svbool_t pg, svint8_t op1, uint64_t imm2) + /// svint8_t svasrd[_n_s8]_x(svbool_t pg, svint8_t op1, uint64_t imm2) + /// svint8_t svasrd[_n_s8]_z(svbool_t pg, svint8_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticForDivide(Vector value, [ConstantExpected] byte control) => ShiftRightArithmeticForDivide(value, control); + + /// + /// svint16_t svasrd[_n_s16]_m(svbool_t pg, svint16_t op1, uint64_t imm2) + /// svint16_t svasrd[_n_s16]_x(svbool_t pg, svint16_t op1, uint64_t imm2) + /// svint16_t svasrd[_n_s16]_z(svbool_t pg, svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticForDivide(Vector value, [ConstantExpected] byte control) => ShiftRightArithmeticForDivide(value, control); + + /// + /// svint32_t svasrd[_n_s32]_m(svbool_t pg, svint32_t op1, uint64_t imm2) + /// svint32_t svasrd[_n_s32]_x(svbool_t pg, svint32_t op1, uint64_t imm2) + /// svint32_t svasrd[_n_s32]_z(svbool_t pg, svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticForDivide(Vector value, [ConstantExpected] byte control) => ShiftRightArithmeticForDivide(value, control); + + /// + /// svint64_t svasrd[_n_s64]_m(svbool_t pg, svint64_t op1, uint64_t imm2) + /// svint64_t svasrd[_n_s64]_x(svbool_t pg, svint64_t op1, uint64_t imm2) + /// svint64_t svasrd[_n_s64]_z(svbool_t pg, svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticForDivide(Vector value, [ConstantExpected] byte control) => ShiftRightArithmeticForDivide(value, control); + + + /// ShiftRightLogical : Logical shift right + + /// + /// svuint8_t svlsr[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svlsr[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svlsr[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector ShiftRightLogical(Vector left, Vector right) => ShiftRightLogical(left, right); + + /// + /// svuint8_t svlsr_wide[_u8]_m(svbool_t pg, svuint8_t op1, svuint64_t op2) + /// svuint8_t svlsr_wide[_u8]_x(svbool_t pg, svuint8_t op1, svuint64_t op2) + /// svuint8_t svlsr_wide[_u8]_z(svbool_t pg, svuint8_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftRightLogical(Vector left, Vector right) => ShiftRightLogical(left, right); + + /// + /// svuint16_t svlsr[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svlsr[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svlsr[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector ShiftRightLogical(Vector left, Vector right) => ShiftRightLogical(left, right); + + /// + /// svuint16_t svlsr_wide[_u16]_m(svbool_t pg, svuint16_t op1, svuint64_t op2) + /// svuint16_t svlsr_wide[_u16]_x(svbool_t pg, svuint16_t op1, svuint64_t op2) + /// svuint16_t svlsr_wide[_u16]_z(svbool_t pg, svuint16_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftRightLogical(Vector left, Vector right) => ShiftRightLogical(left, right); + + /// + /// svuint32_t svlsr[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svlsr[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svlsr[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector ShiftRightLogical(Vector left, Vector right) => ShiftRightLogical(left, right); + + /// + /// svuint32_t svlsr_wide[_u32]_m(svbool_t pg, svuint32_t op1, svuint64_t op2) + /// svuint32_t svlsr_wide[_u32]_x(svbool_t pg, svuint32_t op1, svuint64_t op2) + /// svuint32_t svlsr_wide[_u32]_z(svbool_t pg, svuint32_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftRightLogical(Vector left, Vector right) => ShiftRightLogical(left, right); + + /// + /// svuint64_t svlsr[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svlsr[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svlsr[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector ShiftRightLogical(Vector left, Vector right) => ShiftRightLogical(left, right); + + + /// SignExtend16 : Sign-extend the low 16 bits + + /// + /// svint32_t svexth[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// svint32_t svexth[_s32]_x(svbool_t pg, svint32_t op) + /// svint32_t svexth[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector SignExtend16(Vector value) => SignExtend16(value); + + /// + /// svint64_t svexth[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// svint64_t svexth[_s64]_x(svbool_t pg, svint64_t op) + /// svint64_t svexth[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector SignExtend16(Vector value) => SignExtend16(value); + + + /// SignExtend32 : Sign-extend the low 32 bits + + /// + /// svint64_t svextw[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// svint64_t svextw[_s64]_x(svbool_t pg, svint64_t op) + /// svint64_t svextw[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector SignExtend32(Vector value) => SignExtend32(value); + + + /// SignExtend8 : Sign-extend the low 8 bits + + /// + /// svint16_t svextb[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) + /// svint16_t svextb[_s16]_x(svbool_t pg, svint16_t op) + /// svint16_t svextb[_s16]_z(svbool_t pg, svint16_t op) + /// + public static unsafe Vector SignExtend8(Vector value) => SignExtend8(value); + + /// + /// svint32_t svextb[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// svint32_t svextb[_s32]_x(svbool_t pg, svint32_t op) + /// svint32_t svextb[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector SignExtend8(Vector value) => SignExtend8(value); + + /// + /// svint64_t svextb[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// svint64_t svextb[_s64]_x(svbool_t pg, svint64_t op) + /// svint64_t svextb[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector SignExtend8(Vector value) => SignExtend8(value); + + + /// SignExtendWideningLower : Unpack and extend low half + + /// + /// svint16_t svunpklo[_s16](svint8_t op) + /// + public static unsafe Vector SignExtendWideningLower(Vector value) => SignExtendWideningLower(value); + + /// + /// svint32_t svunpklo[_s32](svint16_t op) + /// + public static unsafe Vector SignExtendWideningLower(Vector value) => SignExtendWideningLower(value); + + /// + /// svint64_t svunpklo[_s64](svint32_t op) + /// + public static unsafe Vector SignExtendWideningLower(Vector value) => SignExtendWideningLower(value); + + + /// SignExtendWideningUpper : Unpack and extend high half + + /// + /// svint16_t svunpkhi[_s16](svint8_t op) + /// + public static unsafe Vector SignExtendWideningUpper(Vector value) => SignExtendWideningUpper(value); + + /// + /// svint32_t svunpkhi[_s32](svint16_t op) + /// + public static unsafe Vector SignExtendWideningUpper(Vector value) => SignExtendWideningUpper(value); + + /// + /// svint64_t svunpkhi[_s64](svint32_t op) + /// + public static unsafe Vector SignExtendWideningUpper(Vector value) => SignExtendWideningUpper(value); + + + /// Splice : Splice two vectors under predicate control + + /// + /// svint8_t svsplice[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) => Splice(mask, left, right); + + /// + /// svint16_t svsplice[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) => Splice(mask, left, right); + + /// + /// svint32_t svsplice[_s32](svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) => Splice(mask, left, right); + + /// + /// svint64_t svsplice[_s64](svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) => Splice(mask, left, right); + + /// + /// svuint8_t svsplice[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) => Splice(mask, left, right); + + /// + /// svuint16_t svsplice[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) => Splice(mask, left, right); + + /// + /// svuint32_t svsplice[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) => Splice(mask, left, right); + + /// + /// svuint64_t svsplice[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) => Splice(mask, left, right); + + /// + /// svfloat32_t svsplice[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) => Splice(mask, left, right); + + /// + /// svfloat64_t svsplice[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) => Splice(mask, left, right); + + + /// Sqrt : Square root + + /// + /// svfloat32_t svsqrt[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) + /// svfloat32_t svsqrt[_f32]_x(svbool_t pg, svfloat32_t op) + /// svfloat32_t svsqrt[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector Sqrt(Vector value) => Sqrt(value); + + /// + /// svfloat64_t svsqrt[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) + /// svfloat64_t svsqrt[_f64]_x(svbool_t pg, svfloat64_t op) + /// svfloat64_t svsqrt[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector Sqrt(Vector value) => Sqrt(value); + + + /// Store : Non-truncating store + + /// + /// void svst1[_s8](svbool_t pg, int8_t *base, svint8_t data) + /// + public static unsafe void Store(Vector mask, sbyte* address, Vector data) => Store(mask, address, data); + + /// + /// void svst2[_s8](svbool_t pg, int8_t *base, svint8x2_t data) + /// + public static unsafe void Store(Vector mask, sbyte* address, (Vector Value1, Vector Value2) data) => Store(mask, address, Value1,); + + /// + /// void svst3[_s8](svbool_t pg, int8_t *base, svint8x3_t data) + /// + public static unsafe void Store(Vector mask, sbyte* address, (Vector Value1, Vector Value2, Vector Value3) data) => Store(mask, address, Value1,); + + /// + /// void svst4[_s8](svbool_t pg, int8_t *base, svint8x4_t data) + /// + public static unsafe void Store(Vector mask, sbyte* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) => Store(mask, address, Value1,); + + /// + /// void svst1[_s16](svbool_t pg, int16_t *base, svint16_t data) + /// + public static unsafe void Store(Vector mask, short* address, Vector data) => Store(mask, address, data); + + /// + /// void svst2[_s16](svbool_t pg, int16_t *base, svint16x2_t data) + /// + public static unsafe void Store(Vector mask, short* address, (Vector Value1, Vector Value2) data) => Store(mask, address, Value1,); + + /// + /// void svst3[_s16](svbool_t pg, int16_t *base, svint16x3_t data) + /// + public static unsafe void Store(Vector mask, short* address, (Vector Value1, Vector Value2, Vector Value3) data) => Store(mask, address, Value1,); + + /// + /// void svst4[_s16](svbool_t pg, int16_t *base, svint16x4_t data) + /// + public static unsafe void Store(Vector mask, short* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) => Store(mask, address, Value1,); + + /// + /// void svst1[_s32](svbool_t pg, int32_t *base, svint32_t data) + /// + public static unsafe void Store(Vector mask, int* address, Vector data) => Store(mask, address, data); + + /// + /// void svst2[_s32](svbool_t pg, int32_t *base, svint32x2_t data) + /// + public static unsafe void Store(Vector mask, int* address, (Vector Value1, Vector Value2) data) => Store(mask, address, Value1,); + + /// + /// void svst3[_s32](svbool_t pg, int32_t *base, svint32x3_t data) + /// + public static unsafe void Store(Vector mask, int* address, (Vector Value1, Vector Value2, Vector Value3) data) => Store(mask, address, Value1,); + + /// + /// void svst4[_s32](svbool_t pg, int32_t *base, svint32x4_t data) + /// + public static unsafe void Store(Vector mask, int* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) => Store(mask, address, Value1,); + + /// + /// void svst1[_s64](svbool_t pg, int64_t *base, svint64_t data) + /// + public static unsafe void Store(Vector mask, long* address, Vector data) => Store(mask, address, data); + + /// + /// void svst2[_s64](svbool_t pg, int64_t *base, svint64x2_t data) + /// + public static unsafe void Store(Vector mask, long* address, (Vector Value1, Vector Value2) data) => Store(mask, address, Value1,); + + /// + /// void svst3[_s64](svbool_t pg, int64_t *base, svint64x3_t data) + /// + public static unsafe void Store(Vector mask, long* address, (Vector Value1, Vector Value2, Vector Value3) data) => Store(mask, address, Value1,); + + /// + /// void svst4[_s64](svbool_t pg, int64_t *base, svint64x4_t data) + /// + public static unsafe void Store(Vector mask, long* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) => Store(mask, address, Value1,); + + /// + /// void svst1[_u8](svbool_t pg, uint8_t *base, svuint8_t data) + /// + public static unsafe void Store(Vector mask, byte* address, Vector data) => Store(mask, address, data); + + /// + /// void svst2[_u8](svbool_t pg, uint8_t *base, svuint8x2_t data) + /// + public static unsafe void Store(Vector mask, byte* address, (Vector Value1, Vector Value2) data) => Store(mask, address, Value1,); + + /// + /// void svst3[_u8](svbool_t pg, uint8_t *base, svuint8x3_t data) + /// + public static unsafe void Store(Vector mask, byte* address, (Vector Value1, Vector Value2, Vector Value3) data) => Store(mask, address, Value1,); + + /// + /// void svst4[_u8](svbool_t pg, uint8_t *base, svuint8x4_t data) + /// + public static unsafe void Store(Vector mask, byte* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) => Store(mask, address, Value1,); + + /// + /// void svst1[_u16](svbool_t pg, uint16_t *base, svuint16_t data) + /// + public static unsafe void Store(Vector mask, ushort* address, Vector data) => Store(mask, address, data); + + /// + /// void svst2[_u16](svbool_t pg, uint16_t *base, svuint16x2_t data) + /// + public static unsafe void Store(Vector mask, ushort* address, (Vector Value1, Vector Value2) data) => Store(mask, address, Value1,); + + /// + /// void svst3[_u16](svbool_t pg, uint16_t *base, svuint16x3_t data) + /// + public static unsafe void Store(Vector mask, ushort* address, (Vector Value1, Vector Value2, Vector Value3) data) => Store(mask, address, Value1,); + + /// + /// void svst4[_u16](svbool_t pg, uint16_t *base, svuint16x4_t data) + /// + public static unsafe void Store(Vector mask, ushort* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) => Store(mask, address, Value1,); + + /// + /// void svst1[_u32](svbool_t pg, uint32_t *base, svuint32_t data) + /// + public static unsafe void Store(Vector mask, uint* address, Vector data) => Store(mask, address, data); + + /// + /// void svst2[_u32](svbool_t pg, uint32_t *base, svuint32x2_t data) + /// + public static unsafe void Store(Vector mask, uint* address, (Vector Value1, Vector Value2) data) => Store(mask, address, Value1,); + + /// + /// void svst3[_u32](svbool_t pg, uint32_t *base, svuint32x3_t data) + /// + public static unsafe void Store(Vector mask, uint* address, (Vector Value1, Vector Value2, Vector Value3) data) => Store(mask, address, Value1,); + + /// + /// void svst4[_u32](svbool_t pg, uint32_t *base, svuint32x4_t data) + /// + public static unsafe void Store(Vector mask, uint* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) => Store(mask, address, Value1,); + + /// + /// void svst1[_u64](svbool_t pg, uint64_t *base, svuint64_t data) + /// + public static unsafe void Store(Vector mask, ulong* address, Vector data) => Store(mask, address, data); + + /// + /// void svst2[_u64](svbool_t pg, uint64_t *base, svuint64x2_t data) + /// + public static unsafe void Store(Vector mask, ulong* address, (Vector Value1, Vector Value2) data) => Store(mask, address, Value1,); + + /// + /// void svst3[_u64](svbool_t pg, uint64_t *base, svuint64x3_t data) + /// + public static unsafe void Store(Vector mask, ulong* address, (Vector Value1, Vector Value2, Vector Value3) data) => Store(mask, address, Value1,); + + /// + /// void svst4[_u64](svbool_t pg, uint64_t *base, svuint64x4_t data) + /// + public static unsafe void Store(Vector mask, ulong* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) => Store(mask, address, Value1,); + + /// + /// void svst1[_f32](svbool_t pg, float32_t *base, svfloat32_t data) + /// + public static unsafe void Store(Vector mask, float* address, Vector data) => Store(mask, address, data); + + /// + /// void svst2[_f32](svbool_t pg, float32_t *base, svfloat32x2_t data) + /// + public static unsafe void Store(Vector mask, float* address, (Vector Value1, Vector Value2) data) => Store(mask, address, Value1,); + + /// + /// void svst3[_f32](svbool_t pg, float32_t *base, svfloat32x3_t data) + /// + public static unsafe void Store(Vector mask, float* address, (Vector Value1, Vector Value2, Vector Value3) data) => Store(mask, address, Value1,); + + /// + /// void svst4[_f32](svbool_t pg, float32_t *base, svfloat32x4_t data) + /// + public static unsafe void Store(Vector mask, float* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) => Store(mask, address, Value1,); + + /// + /// void svst1[_f64](svbool_t pg, float64_t *base, svfloat64_t data) + /// + public static unsafe void Store(Vector mask, double* address, Vector data) => Store(mask, address, data); + + /// + /// void svst2[_f64](svbool_t pg, float64_t *base, svfloat64x2_t data) + /// + public static unsafe void Store(Vector mask, double* address, (Vector Value1, Vector Value2) data) => Store(mask, address, Value1,); + + /// + /// void svst3[_f64](svbool_t pg, float64_t *base, svfloat64x3_t data) + /// + public static unsafe void Store(Vector mask, double* address, (Vector Value1, Vector Value2, Vector Value3) data) => Store(mask, address, Value1,); + + /// + /// void svst4[_f64](svbool_t pg, float64_t *base, svfloat64x4_t data) + /// + public static unsafe void Store(Vector mask, double* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) => Store(mask, address, Value1,); + + + /// StoreNarrowing : Truncate to 8 bits and store + + /// + /// void svst1b[_s16](svbool_t pg, int8_t *base, svint16_t data) + /// + public static unsafe void StoreNarrowing(Vector mask, sbyte* address, Vector data) => StoreNarrowing(mask, address, data); + + /// + /// void svst1b[_s32](svbool_t pg, int8_t *base, svint32_t data) + /// + public static unsafe void StoreNarrowing(Vector mask, sbyte* address, Vector data) => StoreNarrowing(mask, address, data); + + /// + /// void svst1h[_s32](svbool_t pg, int16_t *base, svint32_t data) + /// + public static unsafe void StoreNarrowing(Vector mask, short* address, Vector data) => StoreNarrowing(mask, address, data); + + /// + /// void svst1b[_s64](svbool_t pg, int8_t *base, svint64_t data) + /// + public static unsafe void StoreNarrowing(Vector mask, sbyte* address, Vector data) => StoreNarrowing(mask, address, data); + + /// + /// void svst1h[_s64](svbool_t pg, int16_t *base, svint64_t data) + /// + public static unsafe void StoreNarrowing(Vector mask, short* address, Vector data) => StoreNarrowing(mask, address, data); + + /// + /// void svst1w[_s64](svbool_t pg, int32_t *base, svint64_t data) + /// + public static unsafe void StoreNarrowing(Vector mask, int* address, Vector data) => StoreNarrowing(mask, address, data); + + /// + /// void svst1b[_u16](svbool_t pg, uint8_t *base, svuint16_t data) + /// + public static unsafe void StoreNarrowing(Vector mask, byte* address, Vector data) => StoreNarrowing(mask, address, data); + + /// + /// void svst1b[_u32](svbool_t pg, uint8_t *base, svuint32_t data) + /// + public static unsafe void StoreNarrowing(Vector mask, byte* address, Vector data) => StoreNarrowing(mask, address, data); + + /// + /// void svst1h[_u32](svbool_t pg, uint16_t *base, svuint32_t data) + /// + public static unsafe void StoreNarrowing(Vector mask, ushort* address, Vector data) => StoreNarrowing(mask, address, data); + + /// + /// void svst1b[_u64](svbool_t pg, uint8_t *base, svuint64_t data) + /// + public static unsafe void StoreNarrowing(Vector mask, byte* address, Vector data) => StoreNarrowing(mask, address, data); + + /// + /// void svst1h[_u64](svbool_t pg, uint16_t *base, svuint64_t data) + /// + public static unsafe void StoreNarrowing(Vector mask, ushort* address, Vector data) => StoreNarrowing(mask, address, data); + + /// + /// void svst1w[_u64](svbool_t pg, uint32_t *base, svuint64_t data) + /// + public static unsafe void StoreNarrowing(Vector mask, uint* address, Vector data) => StoreNarrowing(mask, address, data); + + + /// StoreNonTemporal : Non-truncating store, non-temporal + + /// + /// void svstnt1[_s8](svbool_t pg, int8_t *base, svint8_t data) + /// + public static unsafe void StoreNonTemporal(Vector mask, sbyte* address, Vector data) => StoreNonTemporal(mask, address, data); + + /// + /// void svstnt1[_s16](svbool_t pg, int16_t *base, svint16_t data) + /// + public static unsafe void StoreNonTemporal(Vector mask, short* address, Vector data) => StoreNonTemporal(mask, address, data); + + /// + /// void svstnt1[_s32](svbool_t pg, int32_t *base, svint32_t data) + /// + public static unsafe void StoreNonTemporal(Vector mask, int* address, Vector data) => StoreNonTemporal(mask, address, data); + + /// + /// void svstnt1[_s64](svbool_t pg, int64_t *base, svint64_t data) + /// + public static unsafe void StoreNonTemporal(Vector mask, long* address, Vector data) => StoreNonTemporal(mask, address, data); + + /// + /// void svstnt1[_u8](svbool_t pg, uint8_t *base, svuint8_t data) + /// + public static unsafe void StoreNonTemporal(Vector mask, byte* address, Vector data) => StoreNonTemporal(mask, address, data); + + /// + /// void svstnt1[_u16](svbool_t pg, uint16_t *base, svuint16_t data) + /// + public static unsafe void StoreNonTemporal(Vector mask, ushort* address, Vector data) => StoreNonTemporal(mask, address, data); + + /// + /// void svstnt1[_u32](svbool_t pg, uint32_t *base, svuint32_t data) + /// + public static unsafe void StoreNonTemporal(Vector mask, uint* address, Vector data) => StoreNonTemporal(mask, address, data); + + /// + /// void svstnt1[_u64](svbool_t pg, uint64_t *base, svuint64_t data) + /// + public static unsafe void StoreNonTemporal(Vector mask, ulong* address, Vector data) => StoreNonTemporal(mask, address, data); + + /// + /// void svstnt1[_f32](svbool_t pg, float32_t *base, svfloat32_t data) + /// + public static unsafe void StoreNonTemporal(Vector mask, float* address, Vector data) => StoreNonTemporal(mask, address, data); + + /// + /// void svstnt1[_f64](svbool_t pg, float64_t *base, svfloat64_t data) + /// + public static unsafe void StoreNonTemporal(Vector mask, double* address, Vector data) => StoreNonTemporal(mask, address, data); + + + /// Subtract : Subtract + + /// + /// svint8_t svsub[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svsub[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svsub[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector Subtract(Vector left, Vector right) => Subtract(left, right); + + /// + /// svint16_t svsub[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svsub[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svsub[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector Subtract(Vector left, Vector right) => Subtract(left, right); + + /// + /// svint32_t svsub[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svsub[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svsub[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector Subtract(Vector left, Vector right) => Subtract(left, right); + + /// + /// svint64_t svsub[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svsub[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svsub[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector Subtract(Vector left, Vector right) => Subtract(left, right); + + /// + /// svuint8_t svsub[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svsub[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svsub[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector Subtract(Vector left, Vector right) => Subtract(left, right); + + /// + /// svuint16_t svsub[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svsub[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svsub[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector Subtract(Vector left, Vector right) => Subtract(left, right); + + /// + /// svuint32_t svsub[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svsub[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svsub[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector Subtract(Vector left, Vector right) => Subtract(left, right); + + /// + /// svuint64_t svsub[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svsub[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svsub[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector Subtract(Vector left, Vector right) => Subtract(left, right); + + /// + /// svfloat32_t svsub[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svsub[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svsub[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector Subtract(Vector left, Vector right) => Subtract(left, right); + + /// + /// svfloat64_t svsub[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svsub[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svsub[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector Subtract(Vector left, Vector right) => Subtract(left, right); + + + + /// SubtractSaturate : Saturating subtract + + /// + /// svint8_t svqsub[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svint16_t svqsub[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svint32_t svqsub[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svint64_t svqsub[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svuint8_t svqsub[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svuint16_t svqsub[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svuint32_t svqsub[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svuint64_t svqsub[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + + /// TestAnyTrue : Test whether any active element is true + + /// + /// bool svptest_any(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask) => TestAnyTrue(leftMask, rightMask); + + /// + /// bool svptest_any(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask) => TestAnyTrue(leftMask, rightMask); + + /// + /// bool svptest_any(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask) => TestAnyTrue(leftMask, rightMask); + + /// + /// bool svptest_any(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask) => TestAnyTrue(leftMask, rightMask); + + /// + /// bool svptest_any(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask) => TestAnyTrue(leftMask, rightMask); + + /// + /// bool svptest_any(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask) => TestAnyTrue(leftMask, rightMask); + + /// + /// bool svptest_any(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask) => TestAnyTrue(leftMask, rightMask); + + /// + /// bool svptest_any(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask) => TestAnyTrue(leftMask, rightMask); + + + /// TestFirstTrue : Test whether the first active element is true + + /// + /// bool svptest_first(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask) => TestFirstTrue(leftMask, rightMask); + + /// + /// bool svptest_first(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask) => TestFirstTrue(leftMask, rightMask); + + /// + /// bool svptest_first(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask) => TestFirstTrue(leftMask, rightMask); + + /// + /// bool svptest_first(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask) => TestFirstTrue(leftMask, rightMask); + + /// + /// bool svptest_first(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask) => TestFirstTrue(leftMask, rightMask); + + /// + /// bool svptest_first(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask) => TestFirstTrue(leftMask, rightMask); + + /// + /// bool svptest_first(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask) => TestFirstTrue(leftMask, rightMask); + + /// + /// bool svptest_first(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask) => TestFirstTrue(leftMask, rightMask); + + + /// TestLastTrue : Test whether the last active element is true + + /// + /// bool svptest_last(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask) => TestLastTrue(leftMask, rightMask); + + /// + /// bool svptest_last(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask) => TestLastTrue(leftMask, rightMask); + + /// + /// bool svptest_last(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask) => TestLastTrue(leftMask, rightMask); + + /// + /// bool svptest_last(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask) => TestLastTrue(leftMask, rightMask); + + /// + /// bool svptest_last(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask) => TestLastTrue(leftMask, rightMask); + + /// + /// bool svptest_last(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask) => TestLastTrue(leftMask, rightMask); + + /// + /// bool svptest_last(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask) => TestLastTrue(leftMask, rightMask); + + /// + /// bool svptest_last(svbool_t pg, svbool_t op) + /// + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask) => TestLastTrue(leftMask, rightMask); + + + /// TransposeEven : Interleave even elements from two inputs + + /// + /// svint8_t svtrn1[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) => TransposeEven(left, right); + + /// + /// svint16_t svtrn1[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) => TransposeEven(left, right); + + /// + /// svint32_t svtrn1[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) => TransposeEven(left, right); + + /// + /// svint64_t svtrn1[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) => TransposeEven(left, right); + + /// + /// svuint8_t svtrn1[_u8](svuint8_t op1, svuint8_t op2) + /// svbool_t svtrn1_b8(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) => TransposeEven(left, right); + + /// + /// svuint16_t svtrn1[_u16](svuint16_t op1, svuint16_t op2) + /// svbool_t svtrn1_b16(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) => TransposeEven(left, right); + + /// + /// svuint32_t svtrn1[_u32](svuint32_t op1, svuint32_t op2) + /// svbool_t svtrn1_b32(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) => TransposeEven(left, right); + + /// + /// svuint64_t svtrn1[_u64](svuint64_t op1, svuint64_t op2) + /// svbool_t svtrn1_b64(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) => TransposeEven(left, right); + + /// + /// svfloat32_t svtrn1[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) => TransposeEven(left, right); + + /// + /// svfloat64_t svtrn1[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) => TransposeEven(left, right); + + + /// TransposeOdd : Interleave odd elements from two inputs + + /// + /// svint8_t svtrn2[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) => TransposeOdd(left, right); + + /// + /// svint16_t svtrn2[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) => TransposeOdd(left, right); + + /// + /// svint32_t svtrn2[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) => TransposeOdd(left, right); + + /// + /// svint64_t svtrn2[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) => TransposeOdd(left, right); + + /// + /// svuint8_t svtrn2[_u8](svuint8_t op1, svuint8_t op2) + /// svbool_t svtrn2_b8(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) => TransposeOdd(left, right); + + /// + /// svuint16_t svtrn2[_u16](svuint16_t op1, svuint16_t op2) + /// svbool_t svtrn2_b16(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) => TransposeOdd(left, right); + + /// + /// svuint32_t svtrn2[_u32](svuint32_t op1, svuint32_t op2) + /// svbool_t svtrn2_b32(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) => TransposeOdd(left, right); + + /// + /// svuint64_t svtrn2[_u64](svuint64_t op1, svuint64_t op2) + /// svbool_t svtrn2_b64(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) => TransposeOdd(left, right); + + /// + /// svfloat32_t svtrn2[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) => TransposeOdd(left, right); + + /// + /// svfloat64_t svtrn2[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) => TransposeOdd(left, right); + + + /// TrigonometricMultiplyAddCoefficient : Trigonometric multiply-add coefficient + + /// + /// svfloat32_t svtmad[_f32](svfloat32_t op1, svfloat32_t op2, uint64_t imm3) + /// + public static unsafe Vector TrigonometricMultiplyAddCoefficient(Vector left, Vector right, [ConstantExpected] byte control) => TrigonometricMultiplyAddCoefficient(left, right, control); + + /// + /// svfloat64_t svtmad[_f64](svfloat64_t op1, svfloat64_t op2, uint64_t imm3) + /// + public static unsafe Vector TrigonometricMultiplyAddCoefficient(Vector left, Vector right, [ConstantExpected] byte control) => TrigonometricMultiplyAddCoefficient(left, right, control); + + + /// TrigonometricSelectCoefficient : Trigonometric select coefficient + + /// + /// svfloat32_t svtssel[_f32](svfloat32_t op1, svuint32_t op2) + /// + public static unsafe Vector TrigonometricSelectCoefficient(Vector value, Vector selector) => TrigonometricSelectCoefficient(value, selector); + + /// + /// svfloat64_t svtssel[_f64](svfloat64_t op1, svuint64_t op2) + /// + public static unsafe Vector TrigonometricSelectCoefficient(Vector value, Vector selector) => TrigonometricSelectCoefficient(value, selector); + + + /// TrigonometricStartingValue : Trigonometric starting value + + /// + /// svfloat32_t svtsmul[_f32](svfloat32_t op1, svuint32_t op2) + /// + public static unsafe Vector TrigonometricStartingValue(Vector value, Vector sign) => TrigonometricStartingValue(value, sign); + + /// + /// svfloat64_t svtsmul[_f64](svfloat64_t op1, svuint64_t op2) + /// + public static unsafe Vector TrigonometricStartingValue(Vector value, Vector sign) => TrigonometricStartingValue(value, sign); + + + /// UnzipEven : Concatenate even elements from two inputs + + /// + /// svint8_t svuzp1[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) => UnzipEven(left, right); + + /// + /// svint16_t svuzp1[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) => UnzipEven(left, right); + + /// + /// svint32_t svuzp1[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) => UnzipEven(left, right); + + /// + /// svint64_t svuzp1[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) => UnzipEven(left, right); + + /// + /// svuint8_t svuzp1[_u8](svuint8_t op1, svuint8_t op2) + /// svbool_t svuzp1_b8(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) => UnzipEven(left, right); + + /// + /// svuint16_t svuzp1[_u16](svuint16_t op1, svuint16_t op2) + /// svbool_t svuzp1_b16(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) => UnzipEven(left, right); + + /// + /// svuint32_t svuzp1[_u32](svuint32_t op1, svuint32_t op2) + /// svbool_t svuzp1_b32(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) => UnzipEven(left, right); + + /// + /// svuint64_t svuzp1[_u64](svuint64_t op1, svuint64_t op2) + /// svbool_t svuzp1_b64(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) => UnzipEven(left, right); + + /// + /// svfloat32_t svuzp1[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) => UnzipEven(left, right); + + /// + /// svfloat64_t svuzp1[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) => UnzipEven(left, right); + + + /// UnzipOdd : Concatenate odd elements from two inputs + + /// + /// svint8_t svuzp2[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) => UnzipOdd(left, right); + + /// + /// svint16_t svuzp2[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) => UnzipOdd(left, right); + + /// + /// svint32_t svuzp2[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) => UnzipOdd(left, right); + + /// + /// svint64_t svuzp2[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) => UnzipOdd(left, right); + + /// + /// svuint8_t svuzp2[_u8](svuint8_t op1, svuint8_t op2) + /// svbool_t svuzp2_b8(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) => UnzipOdd(left, right); + + /// + /// svuint16_t svuzp2[_u16](svuint16_t op1, svuint16_t op2) + /// svbool_t svuzp2_b16(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) => UnzipOdd(left, right); + + /// + /// svuint32_t svuzp2[_u32](svuint32_t op1, svuint32_t op2) + /// svbool_t svuzp2_b32(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) => UnzipOdd(left, right); + + /// + /// svuint64_t svuzp2[_u64](svuint64_t op1, svuint64_t op2) + /// svbool_t svuzp2_b64(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) => UnzipOdd(left, right); + + /// + /// svfloat32_t svuzp2[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) => UnzipOdd(left, right); + + /// + /// svfloat64_t svuzp2[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) => UnzipOdd(left, right); + + + /// VectorTableLookup : Table lookup in single-vector table + + /// + /// svint8_t svtbl[_s8](svint8_t data, svuint8_t indices) + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) => VectorTableLookup(data, indices); + + /// + /// svint16_t svtbl[_s16](svint16_t data, svuint16_t indices) + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) => VectorTableLookup(data, indices); + + /// + /// svint32_t svtbl[_s32](svint32_t data, svuint32_t indices) + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) => VectorTableLookup(data, indices); + + /// + /// svint64_t svtbl[_s64](svint64_t data, svuint64_t indices) + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) => VectorTableLookup(data, indices); + + /// + /// svuint8_t svtbl[_u8](svuint8_t data, svuint8_t indices) + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) => VectorTableLookup(data, indices); + + /// + /// svuint16_t svtbl[_u16](svuint16_t data, svuint16_t indices) + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) => VectorTableLookup(data, indices); + + /// + /// svuint32_t svtbl[_u32](svuint32_t data, svuint32_t indices) + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) => VectorTableLookup(data, indices); + + /// + /// svuint64_t svtbl[_u64](svuint64_t data, svuint64_t indices) + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) => VectorTableLookup(data, indices); + + /// + /// svfloat32_t svtbl[_f32](svfloat32_t data, svuint32_t indices) + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) => VectorTableLookup(data, indices); + + /// + /// svfloat64_t svtbl[_f64](svfloat64_t data, svuint64_t indices) + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) => VectorTableLookup(data, indices); + + + /// Xor : Bitwise exclusive OR + + /// + /// svint8_t sveor[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t sveor[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t sveor[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Xor(Vector left, Vector right) => Xor(left, right); + + /// + /// svint16_t sveor[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t sveor[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t sveor[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Xor(Vector left, Vector right) => Xor(left, right); + + /// + /// svint32_t sveor[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t sveor[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t sveor[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Xor(Vector left, Vector right) => Xor(left, right); + + /// + /// svint64_t sveor[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t sveor[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t sveor[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Xor(Vector left, Vector right) => Xor(left, right); + + /// + /// svuint8_t sveor[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t sveor[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t sveor[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Xor(Vector left, Vector right) => Xor(left, right); + + /// + /// svuint16_t sveor[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t sveor[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t sveor[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Xor(Vector left, Vector right) => Xor(left, right); + + /// + /// svuint32_t sveor[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t sveor[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t sveor[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Xor(Vector left, Vector right) => Xor(left, right); + + /// + /// svuint64_t sveor[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t sveor[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t sveor[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// + public static unsafe Vector Xor(Vector left, Vector right) => Xor(left, right); + + + /// XorAcross : Bitwise exclusive OR reduction to scalar + + /// + /// int8_t sveorv[_s8](svbool_t pg, svint8_t op) + /// + public static unsafe Vector XorAcross(Vector value) => XorAcross(value); + + /// + /// int16_t sveorv[_s16](svbool_t pg, svint16_t op) + /// + public static unsafe Vector XorAcross(Vector value) => XorAcross(value); + + /// + /// int32_t sveorv[_s32](svbool_t pg, svint32_t op) + /// + public static unsafe Vector XorAcross(Vector value) => XorAcross(value); + + /// + /// int64_t sveorv[_s64](svbool_t pg, svint64_t op) + /// + public static unsafe Vector XorAcross(Vector value) => XorAcross(value); + + /// + /// uint8_t sveorv[_u8](svbool_t pg, svuint8_t op) + /// + public static unsafe Vector XorAcross(Vector value) => XorAcross(value); + + /// + /// uint16_t sveorv[_u16](svbool_t pg, svuint16_t op) + /// + public static unsafe Vector XorAcross(Vector value) => XorAcross(value); + + /// + /// uint32_t sveorv[_u32](svbool_t pg, svuint32_t op) + /// + public static unsafe Vector XorAcross(Vector value) => XorAcross(value); + + /// + /// uint64_t sveorv[_u64](svbool_t pg, svuint64_t op) + /// + public static unsafe Vector XorAcross(Vector value) => XorAcross(value); + + + /// ZeroExtend16 : Zero-extend the low 16 bits + + /// + /// svuint32_t svexth[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// svuint32_t svexth[_u32]_x(svbool_t pg, svuint32_t op) + /// svuint32_t svexth[_u32]_z(svbool_t pg, svuint32_t op) + /// + public static unsafe Vector ZeroExtend16(Vector value) => ZeroExtend16(value); + + /// + /// svuint64_t svexth[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// svuint64_t svexth[_u64]_x(svbool_t pg, svuint64_t op) + /// svuint64_t svexth[_u64]_z(svbool_t pg, svuint64_t op) + /// + public static unsafe Vector ZeroExtend16(Vector value) => ZeroExtend16(value); + + + /// ZeroExtend32 : Zero-extend the low 32 bits + + /// + /// svuint64_t svextw[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// svuint64_t svextw[_u64]_x(svbool_t pg, svuint64_t op) + /// svuint64_t svextw[_u64]_z(svbool_t pg, svuint64_t op) + /// + public static unsafe Vector ZeroExtend32(Vector value) => ZeroExtend32(value); + + + /// ZeroExtend8 : Zero-extend the low 8 bits + + /// + /// svuint16_t svextb[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) + /// svuint16_t svextb[_u16]_x(svbool_t pg, svuint16_t op) + /// svuint16_t svextb[_u16]_z(svbool_t pg, svuint16_t op) + /// + public static unsafe Vector ZeroExtend8(Vector value) => ZeroExtend8(value); + + /// + /// svuint32_t svextb[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// svuint32_t svextb[_u32]_x(svbool_t pg, svuint32_t op) + /// svuint32_t svextb[_u32]_z(svbool_t pg, svuint32_t op) + /// + public static unsafe Vector ZeroExtend8(Vector value) => ZeroExtend8(value); + + /// + /// svuint64_t svextb[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// svuint64_t svextb[_u64]_x(svbool_t pg, svuint64_t op) + /// svuint64_t svextb[_u64]_z(svbool_t pg, svuint64_t op) + /// + public static unsafe Vector ZeroExtend8(Vector value) => ZeroExtend8(value); + + + /// ZeroExtendWideningLower : Unpack and extend low half + + /// + /// svuint16_t svunpklo[_u16](svuint8_t op) + /// svbool_t svunpklo[_b](svbool_t op) + /// + public static unsafe Vector ZeroExtendWideningLower(Vector value) => ZeroExtendWideningLower(value); + + /// + /// svuint32_t svunpklo[_u32](svuint16_t op) + /// svbool_t svunpklo[_b](svbool_t op) + /// + public static unsafe Vector ZeroExtendWideningLower(Vector value) => ZeroExtendWideningLower(value); + + /// + /// svuint64_t svunpklo[_u64](svuint32_t op) + /// svbool_t svunpklo[_b](svbool_t op) + /// + public static unsafe Vector ZeroExtendWideningLower(Vector value) => ZeroExtendWideningLower(value); + + + /// ZeroExtendWideningUpper : Unpack and extend high half + + /// + /// svuint16_t svunpkhi[_u16](svuint8_t op) + /// svbool_t svunpkhi[_b](svbool_t op) + /// + public static unsafe Vector ZeroExtendWideningUpper(Vector value) => ZeroExtendWideningUpper(value); + + /// + /// svuint32_t svunpkhi[_u32](svuint16_t op) + /// svbool_t svunpkhi[_b](svbool_t op) + /// + public static unsafe Vector ZeroExtendWideningUpper(Vector value) => ZeroExtendWideningUpper(value); + + /// + /// svuint64_t svunpkhi[_u64](svuint32_t op) + /// svbool_t svunpkhi[_b](svbool_t op) + /// + public static unsafe Vector ZeroExtendWideningUpper(Vector value) => ZeroExtendWideningUpper(value); + + + /// ZipHigh : Interleave elements from high halves of two inputs + + /// + /// svint8_t svzip2[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) => ZipHigh(left, right); + + /// + /// svint16_t svzip2[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) => ZipHigh(left, right); + + /// + /// svint32_t svzip2[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) => ZipHigh(left, right); + + /// + /// svint64_t svzip2[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) => ZipHigh(left, right); + + /// + /// svuint8_t svzip2[_u8](svuint8_t op1, svuint8_t op2) + /// svbool_t svzip2_b8(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) => ZipHigh(left, right); + + /// + /// svuint16_t svzip2[_u16](svuint16_t op1, svuint16_t op2) + /// svbool_t svzip2_b16(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) => ZipHigh(left, right); + + /// + /// svuint32_t svzip2[_u32](svuint32_t op1, svuint32_t op2) + /// svbool_t svzip2_b32(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) => ZipHigh(left, right); + + /// + /// svuint64_t svzip2[_u64](svuint64_t op1, svuint64_t op2) + /// svbool_t svzip2_b64(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) => ZipHigh(left, right); + + /// + /// svfloat32_t svzip2[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) => ZipHigh(left, right); + + /// + /// svfloat64_t svzip2[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) => ZipHigh(left, right); + + + /// ZipLow : Interleave elements from low halves of two inputs + + /// + /// svint8_t svzip1[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector ZipLow(Vector left, Vector right) => ZipLow(left, right); + + /// + /// svint16_t svzip1[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector ZipLow(Vector left, Vector right) => ZipLow(left, right); + + /// + /// svint32_t svzip1[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector ZipLow(Vector left, Vector right) => ZipLow(left, right); + + /// + /// svint64_t svzip1[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector ZipLow(Vector left, Vector right) => ZipLow(left, right); + + /// + /// svuint8_t svzip1[_u8](svuint8_t op1, svuint8_t op2) + /// svbool_t svzip1_b8(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ZipLow(Vector left, Vector right) => ZipLow(left, right); + + /// + /// svuint16_t svzip1[_u16](svuint16_t op1, svuint16_t op2) + /// svbool_t svzip1_b16(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ZipLow(Vector left, Vector right) => ZipLow(left, right); + + /// + /// svuint32_t svzip1[_u32](svuint32_t op1, svuint32_t op2) + /// svbool_t svzip1_b32(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ZipLow(Vector left, Vector right) => ZipLow(left, right); + + /// + /// svuint64_t svzip1[_u64](svuint64_t op1, svuint64_t op2) + /// svbool_t svzip1_b64(svbool_t op1, svbool_t op2) + /// + public static unsafe Vector ZipLow(Vector left, Vector right) => ZipLow(left, right); + + /// + /// svfloat32_t svzip1[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector ZipLow(Vector left, Vector right) => ZipLow(left, right); + + /// + /// svfloat64_t svzip1[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector ZipLow(Vector left, Vector right) => ZipLow(left, right); + + } +} + diff --git a/sve_api/out_cs_api/Sve2.PlatformNotSupported.cs b/sve_api/out_cs_api/Sve2.PlatformNotSupported.cs new file mode 100644 index 0000000000000..36ec5bdcf8d88 --- /dev/null +++ b/sve_api/out_cs_api/Sve2.PlatformNotSupported.cs @@ -0,0 +1,5442 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class Sve2 : AdvSimd + { + internal Sve2() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// AbsoluteDifferenceAdd : Absolute difference and accumulate + + /// + /// svint8_t svaba[_s8](svint8_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svaba[_s16](svint16_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svaba[_s32](svint32_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svaba[_s64](svint64_t op1, svint64_t op2, svint64_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svaba[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svaba[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svaba[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svaba[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AbsoluteDifferenceAddWideningLower : Absolute difference and accumulate long (bottom) + + /// + /// svint16_t svabalb[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svabalb[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svabalb[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svabalb[_u16](svuint16_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svabalb[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svabalb[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AbsoluteDifferenceAddWideningUpper : Absolute difference and accumulate long (top) + + /// + /// svint16_t svabalt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svabalt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svabalt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svabalt[_u16](svuint16_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svabalt[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svabalt[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AbsoluteDifferenceWideningLower : Absolute difference long (bottom) + + /// + /// svint16_t svabdlb[_s16](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svabdlb[_s32](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svabdlb[_s64](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svabdlb[_u16](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svabdlb[_u32](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svabdlb[_u64](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AbsoluteDifferenceWideningUpper : Absolute difference long (top) + + /// + /// svint16_t svabdlt[_s16](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svabdlt[_s32](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svabdlt[_s64](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svabdlt[_u16](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svabdlt[_u32](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svabdlt[_u64](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AddCarryWideningLower : Add with carry long (bottom) + + /// + /// svuint32_t svadclb[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector AddCarryWideningLower(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svadclb[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// + public static unsafe Vector AddCarryWideningLower(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + + /// AddCarryWideningUpper : Add with carry long (top) + + /// + /// svuint32_t svadclt[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector AddCarryWideningUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svadclt[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// + public static unsafe Vector AddCarryWideningUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + + /// AddHighNarowingLower : Add narrow high part (bottom) + + /// + /// svint8_t svaddhnb[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector AddHighNarowingLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svaddhnb[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector AddHighNarowingLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svaddhnb[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector AddHighNarowingLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svaddhnb[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector AddHighNarowingLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svaddhnb[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector AddHighNarowingLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svaddhnb[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector AddHighNarowingLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AddHighNarowingUpper : Add narrow high part (top) + + /// + /// svint8_t svaddhnt[_s16](svint8_t even, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svaddhnt[_s32](svint16_t even, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svaddhnt[_s64](svint32_t even, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svaddhnt[_u16](svuint8_t even, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svaddhnt[_u32](svuint16_t even, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svaddhnt[_u64](svuint32_t even, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AddPairwise : Add pairwise + + /// + /// svint8_t svaddp[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svaddp[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svaddp[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svaddp[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svaddp[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svaddp[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svaddp[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svaddp[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svaddp[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svaddp[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svaddp[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svaddp[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svaddp[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svaddp[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svaddp[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svaddp[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svaddp[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svaddp[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svaddp[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svaddp[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AddPairwiseWidening : Add and accumulate long pairwise + + /// + /// svint16_t svadalp[_s16]_m(svbool_t pg, svint16_t op1, svint8_t op2) + /// svint16_t svadalp[_s16]_x(svbool_t pg, svint16_t op1, svint8_t op2) + /// svint16_t svadalp[_s16]_z(svbool_t pg, svint16_t op1, svint8_t op2) + /// + public static unsafe Vector AddPairwiseWidening(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svadalp[_s32]_m(svbool_t pg, svint32_t op1, svint16_t op2) + /// svint32_t svadalp[_s32]_x(svbool_t pg, svint32_t op1, svint16_t op2) + /// svint32_t svadalp[_s32]_z(svbool_t pg, svint32_t op1, svint16_t op2) + /// + public static unsafe Vector AddPairwiseWidening(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svadalp[_s64]_m(svbool_t pg, svint64_t op1, svint32_t op2) + /// svint64_t svadalp[_s64]_x(svbool_t pg, svint64_t op1, svint32_t op2) + /// svint64_t svadalp[_s64]_z(svbool_t pg, svint64_t op1, svint32_t op2) + /// + public static unsafe Vector AddPairwiseWidening(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svadalp[_u16]_m(svbool_t pg, svuint16_t op1, svuint8_t op2) + /// svuint16_t svadalp[_u16]_x(svbool_t pg, svuint16_t op1, svuint8_t op2) + /// svuint16_t svadalp[_u16]_z(svbool_t pg, svuint16_t op1, svuint8_t op2) + /// + public static unsafe Vector AddPairwiseWidening(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svadalp[_u32]_m(svbool_t pg, svuint32_t op1, svuint16_t op2) + /// svuint32_t svadalp[_u32]_x(svbool_t pg, svuint32_t op1, svuint16_t op2) + /// svuint32_t svadalp[_u32]_z(svbool_t pg, svuint32_t op1, svuint16_t op2) + /// + public static unsafe Vector AddPairwiseWidening(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svadalp[_u64]_m(svbool_t pg, svuint64_t op1, svuint32_t op2) + /// svuint64_t svadalp[_u64]_x(svbool_t pg, svuint64_t op1, svuint32_t op2) + /// svuint64_t svadalp[_u64]_z(svbool_t pg, svuint64_t op1, svuint32_t op2) + /// + public static unsafe Vector AddPairwiseWidening(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AddRotateComplex : Complex add with rotate + + /// + /// svint8_t svcadd[_s8](svint8_t op1, svint8_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svcadd[_s16](svint16_t op1, svint16_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svcadd[_s32](svint32_t op1, svint32_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svcadd[_s64](svint64_t op1, svint64_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svcadd[_u8](svuint8_t op1, svuint8_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svcadd[_u16](svuint16_t op1, svuint16_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svcadd[_u32](svuint32_t op1, svuint32_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svcadd[_u64](svuint64_t op1, svuint64_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + + /// AddSaturate : Saturating add + + /// + /// svint8_t svqadd[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svqadd[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svqadd[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqadd[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svqadd[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svqadd[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqadd[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svqadd[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svqadd[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqadd[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svqadd[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svqadd[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svqadd[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svqadd[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svqadd[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svqadd[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svqadd[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svqadd[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svqadd[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svqadd[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svqadd[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svqadd[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svqadd[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svqadd[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AddSaturateWithSignedAddend : Saturating add with signed addend + + /// + /// svuint8_t svsqadd[_u8]_m(svbool_t pg, svuint8_t op1, svint8_t op2) + /// svuint8_t svsqadd[_u8]_x(svbool_t pg, svuint8_t op1, svint8_t op2) + /// svuint8_t svsqadd[_u8]_z(svbool_t pg, svuint8_t op1, svint8_t op2) + /// + public static unsafe Vector AddSaturateWithSignedAddend(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svsqadd[_u16]_m(svbool_t pg, svuint16_t op1, svint16_t op2) + /// svuint16_t svsqadd[_u16]_x(svbool_t pg, svuint16_t op1, svint16_t op2) + /// svuint16_t svsqadd[_u16]_z(svbool_t pg, svuint16_t op1, svint16_t op2) + /// + public static unsafe Vector AddSaturateWithSignedAddend(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svsqadd[_u32]_m(svbool_t pg, svuint32_t op1, svint32_t op2) + /// svuint32_t svsqadd[_u32]_x(svbool_t pg, svuint32_t op1, svint32_t op2) + /// svuint32_t svsqadd[_u32]_z(svbool_t pg, svuint32_t op1, svint32_t op2) + /// + public static unsafe Vector AddSaturateWithSignedAddend(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svsqadd[_u64]_m(svbool_t pg, svuint64_t op1, svint64_t op2) + /// svuint64_t svsqadd[_u64]_x(svbool_t pg, svuint64_t op1, svint64_t op2) + /// svuint64_t svsqadd[_u64]_z(svbool_t pg, svuint64_t op1, svint64_t op2) + /// + public static unsafe Vector AddSaturateWithSignedAddend(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AddSaturateWithUnsignedAddend : Saturating add with unsigned addend + + /// + /// svint8_t svuqadd[_s8]_m(svbool_t pg, svint8_t op1, svuint8_t op2) + /// svint8_t svuqadd[_s8]_x(svbool_t pg, svint8_t op1, svuint8_t op2) + /// svint8_t svuqadd[_s8]_z(svbool_t pg, svint8_t op1, svuint8_t op2) + /// + public static unsafe Vector AddSaturateWithUnsignedAddend(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svuqadd[_s16]_m(svbool_t pg, svint16_t op1, svuint16_t op2) + /// svint16_t svuqadd[_s16]_x(svbool_t pg, svint16_t op1, svuint16_t op2) + /// svint16_t svuqadd[_s16]_z(svbool_t pg, svint16_t op1, svuint16_t op2) + /// + public static unsafe Vector AddSaturateWithUnsignedAddend(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svuqadd[_s32]_m(svbool_t pg, svint32_t op1, svuint32_t op2) + /// svint32_t svuqadd[_s32]_x(svbool_t pg, svint32_t op1, svuint32_t op2) + /// svint32_t svuqadd[_s32]_z(svbool_t pg, svint32_t op1, svuint32_t op2) + /// + public static unsafe Vector AddSaturateWithUnsignedAddend(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svuqadd[_s64]_m(svbool_t pg, svint64_t op1, svuint64_t op2) + /// svint64_t svuqadd[_s64]_x(svbool_t pg, svint64_t op1, svuint64_t op2) + /// svint64_t svuqadd[_s64]_z(svbool_t pg, svint64_t op1, svuint64_t op2) + /// + public static unsafe Vector AddSaturateWithUnsignedAddend(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AddWideLower : Add wide (bottom) + + /// + /// svint16_t svaddwb[_s16](svint16_t op1, svint8_t op2) + /// + public static unsafe Vector AddWideLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svaddwb[_s32](svint32_t op1, svint16_t op2) + /// + public static unsafe Vector AddWideLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svaddwb[_s64](svint64_t op1, svint32_t op2) + /// + public static unsafe Vector AddWideLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svaddwb[_u16](svuint16_t op1, svuint8_t op2) + /// + public static unsafe Vector AddWideLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svaddwb[_u32](svuint32_t op1, svuint16_t op2) + /// + public static unsafe Vector AddWideLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svaddwb[_u64](svuint64_t op1, svuint32_t op2) + /// + public static unsafe Vector AddWideLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AddWideUpper : Add wide (top) + + /// + /// svint16_t svaddwt[_s16](svint16_t op1, svint8_t op2) + /// + public static unsafe Vector AddWideUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svaddwt[_s32](svint32_t op1, svint16_t op2) + /// + public static unsafe Vector AddWideUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svaddwt[_s64](svint64_t op1, svint32_t op2) + /// + public static unsafe Vector AddWideUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svaddwt[_u16](svuint16_t op1, svuint8_t op2) + /// + public static unsafe Vector AddWideUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svaddwt[_u32](svuint32_t op1, svuint16_t op2) + /// + public static unsafe Vector AddWideUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svaddwt[_u64](svuint64_t op1, svuint32_t op2) + /// + public static unsafe Vector AddWideUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AddWideningLower : Add long (bottom) + + /// + /// svint16_t svaddlb[_s16](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector AddWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svaddlb[_s32](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector AddWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svaddlb[_s64](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector AddWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svaddlb[_u16](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector AddWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svaddlb[_u32](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector AddWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svaddlb[_u64](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector AddWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AddWideningLowerUpper : Add long (bottom + top) + + /// + /// svint16_t svaddlbt[_s16](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector AddWideningLowerUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svaddlbt[_s32](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector AddWideningLowerUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svaddlbt[_s64](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector AddWideningLowerUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AddWideningUpper : Add long (top) + + /// + /// svint16_t svaddlt[_s16](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector AddWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svaddlt[_s32](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector AddWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svaddlt[_s64](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector AddWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svaddlt[_u16](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector AddWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svaddlt[_u32](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector AddWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svaddlt[_u64](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector AddWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// BitwiseClearXor : Bitwise clear and exclusive OR + + /// + /// svint8_t svbcax[_s8](svint8_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svbcax[_s16](svint16_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svbcax[_s32](svint32_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svbcax[_s64](svint64_t op1, svint64_t op2, svint64_t op3) + /// + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svbcax[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svbcax[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svbcax[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svbcax[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask) { throw new PlatformNotSupportedException(); } + + + /// BitwiseSelect : Bitwise select + + /// + /// svint8_t svbsl[_s8](svint8_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svbsl[_s16](svint16_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svbsl[_s32](svint32_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svbsl[_s64](svint64_t op1, svint64_t op2, svint64_t op3) + /// + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svbsl[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svbsl[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svbsl[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svbsl[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + + /// BitwiseSelectLeftInverted : Bitwise select with first input inverted + + /// + /// svint8_t svbsl1n[_s8](svint8_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svbsl1n[_s16](svint16_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svbsl1n[_s32](svint32_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svbsl1n[_s64](svint64_t op1, svint64_t op2, svint64_t op3) + /// + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svbsl1n[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svbsl1n[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svbsl1n[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svbsl1n[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// BitwiseSelectRightInverted : Bitwise select with second input inverted + + /// + /// svint8_t svbsl2n[_s8](svint8_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svbsl2n[_s16](svint16_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svbsl2n[_s32](svint32_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svbsl2n[_s64](svint64_t op1, svint64_t op2, svint64_t op3) + /// + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svbsl2n[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svbsl2n[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svbsl2n[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svbsl2n[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// CountMatchingElements : Count matching elements + + /// + /// svuint32_t svhistcnt[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector CountMatchingElements(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svhistcnt[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector CountMatchingElements(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svhistcnt[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector CountMatchingElements(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svhistcnt[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector CountMatchingElements(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// CountMatchingElementsIn128BitSegments : Count matching elements in 128-bit segments + + /// + /// svuint8_t svhistseg[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector CountMatchingElementsIn128BitSegments(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svhistseg[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector CountMatchingElementsIn128BitSegments(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// CreateWhileGreaterThanMask : While decrementing scalar is greater than + + /// + /// svbool_t svwhilegt_b8[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(int left, int right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilegt_b8[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(long left, long right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilegt_b8[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(uint left, uint right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilegt_b8[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(ulong left, ulong right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilegt_b16[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(int left, int right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilegt_b16[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(long left, long right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilegt_b16[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(uint left, uint right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilegt_b16[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(ulong left, ulong right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilegt_b32[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(int left, int right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilegt_b32[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(long left, long right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilegt_b32[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(uint left, uint right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilegt_b32[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(ulong left, ulong right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilegt_b64[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(int left, int right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilegt_b64[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(long left, long right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilegt_b64[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(uint left, uint right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilegt_b64[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(ulong left, ulong right) { throw new PlatformNotSupportedException(); } + + + /// CreateWhileGreaterThanOrEqualMask : While decrementing scalar is greater than or equal to + + /// + /// svbool_t svwhilege_b8[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(int left, int right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilege_b8[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(long left, long right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilege_b8[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(uint left, uint right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilege_b8[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(ulong left, ulong right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilege_b16[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(int left, int right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilege_b16[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(long left, long right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilege_b16[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(uint left, uint right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilege_b16[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(ulong left, ulong right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilege_b32[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(int left, int right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilege_b32[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(long left, long right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilege_b32[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(uint left, uint right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilege_b32[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(ulong left, ulong right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilege_b64[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(int left, int right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilege_b64[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(long left, long right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilege_b64[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(uint left, uint right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilege_b64[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(ulong left, ulong right) { throw new PlatformNotSupportedException(); } + + + /// CreateWhileReadAfterWriteMask : While free of read-after-write conflicts + + /// + /// svbool_t svwhilerw[_s8](const int8_t *op1, const int8_t *op2) + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(sbyte* left, sbyte* right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilerw[_s16](const int16_t *op1, const int16_t *op2) + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(short* left, short* right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilerw[_s32](const int32_t *op1, const int32_t *op2) + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(int* left, int* right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilerw[_s64](const int64_t *op1, const int64_t *op2) + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(long* left, long* right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilerw[_u8](const uint8_t *op1, const uint8_t *op2) + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(byte* left, byte* right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilerw[_u16](const uint16_t *op1, const uint16_t *op2) + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(ushort* left, ushort* right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilerw[_u32](const uint32_t *op1, const uint32_t *op2) + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(uint* left, uint* right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilerw[_u64](const uint64_t *op1, const uint64_t *op2) + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(ulong* left, ulong* right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilerw[_f32](const float32_t *op1, const float32_t *op2) + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(float* left, float* right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilerw[_f64](const float64_t *op1, const float64_t *op2) + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(double* left, double* right) { throw new PlatformNotSupportedException(); } + + + /// CreateWhileWriteAfterReadMask : While free of write-after-read conflicts + + /// + /// svbool_t svwhilewr[_s8](const int8_t *op1, const int8_t *op2) + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(sbyte* left, sbyte* right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilewr[_s16](const int16_t *op1, const int16_t *op2) + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(short* left, short* right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilewr[_s32](const int32_t *op1, const int32_t *op2) + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(int* left, int* right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilewr[_s64](const int64_t *op1, const int64_t *op2) + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(long* left, long* right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilewr[_u8](const uint8_t *op1, const uint8_t *op2) + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(byte* left, byte* right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilewr[_u16](const uint16_t *op1, const uint16_t *op2) + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(ushort* left, ushort* right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilewr[_u32](const uint32_t *op1, const uint32_t *op2) + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(uint* left, uint* right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilewr[_u64](const uint64_t *op1, const uint64_t *op2) + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(ulong* left, ulong* right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilewr[_f32](const float32_t *op1, const float32_t *op2) + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(float* left, float* right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svwhilewr[_f64](const float64_t *op1, const float64_t *op2) + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(double* left, double* right) { throw new PlatformNotSupportedException(); } + + + /// DotProductComplex : Complex dot product + + /// + /// svint32_t svcdot[_s32](svint32_t op1, svint8_t op2, svint8_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector DotProductComplex(Vector op1, Vector op2, Vector op3, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svcdot_lane[_s32](svint32_t op1, svint8_t op2, svint8_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// + public static unsafe Vector DotProductComplex(Vector op1, Vector op2, Vector op3, ulong imm_index, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svcdot[_s64](svint64_t op1, svint16_t op2, svint16_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector DotProductComplex(Vector op1, Vector op2, Vector op3, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svcdot_lane[_s64](svint64_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// + public static unsafe Vector DotProductComplex(Vector op1, Vector op2, Vector op3, ulong imm_index, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + + /// DownConvertNarrowingUpper : Down convert and narrow (top) + + /// + /// svfloat32_t svcvtnt_f32[_f64]_m(svfloat32_t even, svbool_t pg, svfloat64_t op) + /// svfloat32_t svcvtnt_f32[_f64]_x(svfloat32_t even, svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector DownConvertNarrowingUpper(Vector value) { throw new PlatformNotSupportedException(); } + + + /// DownConvertRoundingOdd : Down convert, rounding to odd + + /// + /// svfloat32_t svcvtx_f32[_f64]_m(svfloat32_t inactive, svbool_t pg, svfloat64_t op) + /// svfloat32_t svcvtx_f32[_f64]_x(svbool_t pg, svfloat64_t op) + /// svfloat32_t svcvtx_f32[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector DownConvertRoundingOdd(Vector value) { throw new PlatformNotSupportedException(); } + + + /// DownConvertRoundingOddUpper : Down convert, rounding to odd (top) + + /// + /// svfloat32_t svcvtxnt_f32[_f64]_m(svfloat32_t even, svbool_t pg, svfloat64_t op) + /// svfloat32_t svcvtxnt_f32[_f64]_x(svfloat32_t even, svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector DownConvertRoundingOddUpper(Vector value) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorByteZeroExtendNonTemporal : Load 8-bit data and zero-extend, non-temporal + + /// + /// svint32_t svldnt1ub_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svldnt1ub_gather_[u32]offset_s32(svbool_t pg, const uint8_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1ub_gather_[s64]offset_s64(svbool_t pg, const uint8_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1ub_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1ub_gather_[u64]offset_s64(svbool_t pg, const uint8_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldnt1ub_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldnt1ub_gather_[u32]offset_u32(svbool_t pg, const uint8_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1ub_gather_[s64]offset_u64(svbool_t pg, const uint8_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1ub_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1ub_gather_[u64]offset_u64(svbool_t pg, const uint8_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorInt16SignExtendNonTemporal : Load 16-bit data and sign-extend, non-temporal + + /// + /// svint32_t svldnt1sh_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1sh_gather_[s64]index_s64(svbool_t pg, const int16_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, short* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1sh_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1sh_gather_[u64]index_s64(svbool_t pg, const int16_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, short* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldnt1sh_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1sh_gather_[s64]index_u64(svbool_t pg, const int16_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, short* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1sh_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1sh_gather_[u64]index_u64(svbool_t pg, const int16_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, short* address, Vector indices) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorInt16WithByteOffsetsSignExtendNonTemporal : Load 16-bit data and sign-extend, non-temporal + + /// + /// svint32_t svldnt1sh_gather_[u32]offset_s32(svbool_t pg, const int16_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1sh_gather_[s64]offset_s64(svbool_t pg, const int16_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1sh_gather_[u64]offset_s64(svbool_t pg, const int16_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldnt1sh_gather_[u32]offset_u32(svbool_t pg, const int16_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1sh_gather_[s64]offset_u64(svbool_t pg, const int16_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1sh_gather_[u64]offset_u64(svbool_t pg, const int16_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorInt32SignExtendNonTemporal : Load 32-bit data and sign-extend, non-temporal + + /// + /// svint64_t svldnt1sw_gather_[s64]index_s64(svbool_t pg, const int32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1sw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1sw_gather_[u64]index_s64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1sw_gather_[s64]index_s64(svbool_t pg, const int32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1sw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1sw_gather_[u64]index_s64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1sw_gather_[s64]index_u64(svbool_t pg, const int32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1sw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1sw_gather_[u64]index_u64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1sw_gather_[s64]index_u64(svbool_t pg, const int32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1sw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1sw_gather_[u64]index_u64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorInt32WithByteOffsetsSignExtendNonTemporal : Load 32-bit data and sign-extend, non-temporal + + /// + /// svint64_t svldnt1sw_gather_[s64]offset_s64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1sw_gather_[u64]offset_s64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1sw_gather_[s64]offset_s64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1sw_gather_[u64]offset_s64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1sw_gather_[s64]offset_u64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1sw_gather_[u64]offset_u64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1sw_gather_[s64]offset_u64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1sw_gather_[u64]offset_u64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorNonTemporal : Unextended load, non-temporal + + /// + /// svint32_t svldnt1_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svldnt1_gather_[u32]offset[_s32](svbool_t pg, const int32_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, int* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1_gather_[s64]offset[_s64](svbool_t pg, const int64_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, long* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1_gather_[u64]offset[_s64](svbool_t pg, const int64_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, long* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1_gather_[s64]index[_s64](svbool_t pg, const int64_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, long* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1_gather_[u64]index[_s64](svbool_t pg, const int64_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, long* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldnt1_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldnt1_gather_[u32]offset[_u32](svbool_t pg, const uint32_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1_gather_[s64]offset[_u64](svbool_t pg, const uint64_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, ulong* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1_gather_[u64]offset[_u64](svbool_t pg, const uint64_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, ulong* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1_gather_[s64]index[_u64](svbool_t pg, const uint64_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, ulong* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1_gather_[u64]index[_u64](svbool_t pg, const uint64_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, ulong* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svldnt1_gather[_u32base]_f32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svldnt1_gather_[u32]offset[_f32](svbool_t pg, const float32_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, float* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svldnt1_gather_[s64]offset[_f64](svbool_t pg, const float64_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, double* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svldnt1_gather_[s64]index[_f64](svbool_t pg, const float64_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, double* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svldnt1_gather[_u64base]_f64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svldnt1_gather_[u64]offset[_f64](svbool_t pg, const float64_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, double* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svldnt1_gather_[u64]index[_f64](svbool_t pg, const float64_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, double* address, Vector indices) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorSByteSignExtendNonTemporal : Load 8-bit data and sign-extend, non-temporal + + /// + /// svint32_t svldnt1sb_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svldnt1sb_gather_[u32]offset_s32(svbool_t pg, const int8_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, sbyte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1sb_gather_[s64]offset_s64(svbool_t pg, const int8_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, sbyte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1sb_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1sb_gather_[u64]offset_s64(svbool_t pg, const int8_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, sbyte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldnt1sb_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldnt1sb_gather_[u32]offset_u32(svbool_t pg, const int8_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, sbyte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1sb_gather_[s64]offset_u64(svbool_t pg, const int8_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, sbyte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1sb_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1sb_gather_[u64]offset_u64(svbool_t pg, const int8_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, sbyte* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal : Load 16-bit data and zero-extend, non-temporal + + /// + /// svint32_t svldnt1uh_gather_[u32]offset_s32(svbool_t pg, const uint16_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, ushort* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1uh_gather_[s64]offset_s64(svbool_t pg, const uint16_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, ushort* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1uh_gather_[u64]offset_s64(svbool_t pg, const uint16_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, ushort* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldnt1uh_gather_[u32]offset_u32(svbool_t pg, const uint16_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, ushort* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1uh_gather_[s64]offset_u64(svbool_t pg, const uint16_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, ushort* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1uh_gather_[u64]offset_u64(svbool_t pg, const uint16_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, ushort* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorUInt16ZeroExtendNonTemporal : Load 16-bit data and zero-extend, non-temporal + + /// + /// svint32_t svldnt1uh_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1uh_gather_[s64]index_s64(svbool_t pg, const uint16_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, ushort* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1uh_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1uh_gather_[u64]index_s64(svbool_t pg, const uint16_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, ushort* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svldnt1uh_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1uh_gather_[s64]index_u64(svbool_t pg, const uint16_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, ushort* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1uh_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1uh_gather_[u64]index_u64(svbool_t pg, const uint16_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, ushort* address, Vector indices) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal : Load 32-bit data and zero-extend, non-temporal + + /// + /// svint64_t svldnt1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets) { throw new PlatformNotSupportedException(); } + + + /// GatherVectorUInt32ZeroExtendNonTemporal : Load 32-bit data and zero-extend, non-temporal + + /// + /// svint64_t svldnt1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svldnt1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, Vector addresses) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svldnt1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices) { throw new PlatformNotSupportedException(); } + + + /// HalvingAdd : Halving add + + /// + /// svint8_t svhadd[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svhadd[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svhadd[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector HalvingAdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svhadd[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svhadd[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svhadd[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector HalvingAdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svhadd[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svhadd[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svhadd[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector HalvingAdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svhadd[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svhadd[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svhadd[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector HalvingAdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svhadd[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svhadd[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svhadd[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector HalvingAdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svhadd[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svhadd[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svhadd[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector HalvingAdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svhadd[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svhadd[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svhadd[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector HalvingAdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svhadd[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svhadd[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svhadd[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector HalvingAdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// HalvingSubtract : Halving subtract + + /// + /// svint8_t svhsub[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svhsub[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svhsub[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector HalvingSubtract(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svhsub[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svhsub[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svhsub[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector HalvingSubtract(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svhsub[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svhsub[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svhsub[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector HalvingSubtract(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svhsub[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svhsub[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svhsub[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector HalvingSubtract(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svhsub[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svhsub[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svhsub[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector HalvingSubtract(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svhsub[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svhsub[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svhsub[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector HalvingSubtract(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svhsub[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svhsub[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svhsub[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector HalvingSubtract(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svhsub[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svhsub[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svhsub[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector HalvingSubtract(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// HalvingSubtractReversed : Halving subtract reversed + + /// + /// svint8_t svhsubr[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svhsubr[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svhsubr[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svhsubr[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svhsubr[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svhsubr[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svhsubr[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svhsubr[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svhsubr[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svhsubr[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svhsubr[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svhsubr[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svhsubr[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svhsubr[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svhsubr[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svhsubr[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svhsubr[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svhsubr[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svhsubr[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svhsubr[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svhsubr[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svhsubr[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svhsubr[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svhsubr[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// InterleavingXorLowerUpper : Interleaving exclusive OR (bottom, top) + + /// + /// svint8_t sveorbt[_s8](svint8_t odd, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t sveorbt[_s16](svint16_t odd, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t sveorbt[_s32](svint32_t odd, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t sveorbt[_s64](svint64_t odd, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t sveorbt[_u8](svuint8_t odd, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t sveorbt[_u16](svuint16_t odd, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t sveorbt[_u32](svuint32_t odd, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t sveorbt[_u64](svuint64_t odd, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// InterleavingXorUpperLower : Interleaving exclusive OR (top, bottom) + + /// + /// svint8_t sveortb[_s8](svint8_t even, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t sveortb[_s16](svint16_t even, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t sveortb[_s32](svint32_t even, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t sveortb[_s64](svint64_t even, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t sveortb[_u8](svuint8_t even, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t sveortb[_u16](svuint16_t even, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t sveortb[_u32](svuint32_t even, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t sveortb[_u64](svuint64_t even, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// Log2 : Base 2 logarithm as integer + + /// + /// svint32_t svlogb[_f32]_m(svint32_t inactive, svbool_t pg, svfloat32_t op) + /// svint32_t svlogb[_f32]_x(svbool_t pg, svfloat32_t op) + /// svint32_t svlogb[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector Log2(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svlogb[_f64]_m(svint64_t inactive, svbool_t pg, svfloat64_t op) + /// svint64_t svlogb[_f64]_x(svbool_t pg, svfloat64_t op) + /// svint64_t svlogb[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector Log2(Vector value) { throw new PlatformNotSupportedException(); } + + + /// Match : Detect any matching elements + + /// + /// svbool_t svmatch[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector Match(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svmatch[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector Match(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svmatch[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector Match(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svmatch[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector Match(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// MaxNumberPairwise : Maximum number pairwise + + /// + /// svfloat32_t svmaxnmp[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svmaxnmp[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector MaxNumberPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svmaxnmp[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svmaxnmp[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector MaxNumberPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// MaxPairwise : Maximum pairwise + + /// + /// svint8_t svmaxp[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svmaxp[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svmaxp[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svmaxp[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svmaxp[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svmaxp[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svmaxp[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svmaxp[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svmaxp[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svmaxp[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svmaxp[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svmaxp[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svmaxp[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svmaxp[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svmaxp[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svmaxp[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svmaxp[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svmaxp[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svmaxp[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svmaxp[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// MinNumberPairwise : Minimum number pairwise + + /// + /// svfloat32_t svminnmp[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svminnmp[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector MinNumberPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svminnmp[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svminnmp[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector MinNumberPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// MinPairwise : Minimum pairwise + + /// + /// svint8_t svminp[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svminp[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svminp[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svminp[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svminp[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svminp[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svminp[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svminp[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svminp[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svminp[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svminp[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svminp[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svminp[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svminp[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svminp[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svminp[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svminp[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svminp[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svminp[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svminp[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// MoveWideningLower : Move long (bottom) + + /// + /// svint16_t svmovlb[_s16](svint8_t op) + /// + public static unsafe Vector MoveWideningLower(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svmovlb[_s32](svint16_t op) + /// + public static unsafe Vector MoveWideningLower(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svmovlb[_s64](svint32_t op) + /// + public static unsafe Vector MoveWideningLower(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svmovlb[_u16](svuint8_t op) + /// + public static unsafe Vector MoveWideningLower(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svmovlb[_u32](svuint16_t op) + /// + public static unsafe Vector MoveWideningLower(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svmovlb[_u64](svuint32_t op) + /// + public static unsafe Vector MoveWideningLower(Vector value) { throw new PlatformNotSupportedException(); } + + + /// MoveWideningUpper : Move long (top) + + /// + /// svint16_t svmovlt[_s16](svint8_t op) + /// + public static unsafe Vector MoveWideningUpper(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svmovlt[_s32](svint16_t op) + /// + public static unsafe Vector MoveWideningUpper(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svmovlt[_s64](svint32_t op) + /// + public static unsafe Vector MoveWideningUpper(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svmovlt[_u16](svuint8_t op) + /// + public static unsafe Vector MoveWideningUpper(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svmovlt[_u32](svuint16_t op) + /// + public static unsafe Vector MoveWideningUpper(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svmovlt[_u64](svuint32_t op) + /// + public static unsafe Vector MoveWideningUpper(Vector value) { throw new PlatformNotSupportedException(); } + + + /// MultiplyAddBySelectedScalar : Multiply-add, addend first + + /// + /// svint16_t svmla_lane[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svmla_lane[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svmla_lane[_s64](svint64_t op1, svint64_t op2, svint64_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svmla_lane[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svmla_lane[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svmla_lane[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + + /// MultiplyAddRotateComplex : Complex multiply-add with rotate + + /// + /// svint8_t svcmla[_s8](svint8_t op1, svint8_t op2, svint8_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svcmla[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svcmla[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svcmla[_s64](svint64_t op1, svint64_t op2, svint64_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svcmla[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svcmla[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svcmla[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svcmla[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + + /// MultiplyAddRotateComplexBySelectedScalar : Complex multiply-add with rotate + + /// + /// svint16_t svcmla_lane[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svcmla_lane[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svcmla_lane[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svcmla_lane[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + + /// MultiplyAddWideningLower : Multiply-add long (bottom) + + /// + /// svint16_t svmlalb[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svmlalb[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svmlalb_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svmlalb[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svmlalb_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svmlalb[_u16](svuint16_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svmlalb[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svmlalb_lane[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svmlalb[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svmlalb_lane[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + + /// MultiplyAddWideningUpper : Multiply-add long (top) + + /// + /// svint16_t svmlalt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svmlalt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svmlalt_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svmlalt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svmlalt_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svmlalt[_u16](svuint16_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svmlalt[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svmlalt_lane[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svmlalt[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svmlalt_lane[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + + /// MultiplyBySelectedScalar : Multiply + + /// + /// svint16_t svmul_lane[_s16](svint16_t op1, svint16_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svmul_lane[_s32](svint32_t op1, svint32_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svmul_lane[_s64](svint64_t op1, svint64_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svmul_lane[_u16](svuint16_t op1, svuint16_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svmul_lane[_u32](svuint32_t op1, svuint32_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svmul_lane[_u64](svuint64_t op1, svuint64_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + + /// MultiplySubtractBySelectedScalar : Multiply-subtract, minuend first + + /// + /// svint16_t svmls_lane[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svmls_lane[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svmls_lane[_s64](svint64_t op1, svint64_t op2, svint64_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svmls_lane[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svmls_lane[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svmls_lane[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + + /// MultiplySubtractWideningLower : Multiply-subtract long (bottom) + + /// + /// svint16_t svmlslb[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svmlslb[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svmlslb_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svmlslb[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svmlslb_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svmlslb[_u16](svuint16_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svmlslb[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svmlslb_lane[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svmlslb[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svmlslb_lane[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + + /// MultiplySubtractWideningUpper : Multiply-subtract long (top) + + /// + /// svint16_t svmlslt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svmlslt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svmlslt_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svmlslt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svmlslt_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svmlslt[_u16](svuint16_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svmlslt[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svmlslt_lane[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svmlslt[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svmlslt_lane[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + + /// MultiplyWideningLower : Multiply long (bottom) + + /// + /// svint16_t svmullb[_s16](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector MultiplyWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svmullb[_s32](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector MultiplyWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svmullb_lane[_s32](svint16_t op1, svint16_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyWideningLower(Vector op1, Vector op2, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svmullb[_s64](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector MultiplyWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svmullb_lane[_s64](svint32_t op1, svint32_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyWideningLower(Vector op1, Vector op2, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svmullb[_u16](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector MultiplyWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svmullb[_u32](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector MultiplyWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svmullb_lane[_u32](svuint16_t op1, svuint16_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyWideningLower(Vector op1, Vector op2, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svmullb[_u64](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector MultiplyWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svmullb_lane[_u64](svuint32_t op1, svuint32_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyWideningLower(Vector op1, Vector op2, ulong imm_index) { throw new PlatformNotSupportedException(); } + + + /// MultiplyWideningUpper : Multiply long (top) + + /// + /// svint16_t svmullt[_s16](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector MultiplyWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svmullt[_s32](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector MultiplyWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svmullt_lane[_s32](svint16_t op1, svint16_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyWideningUpper(Vector op1, Vector op2, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svmullt[_s64](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector MultiplyWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svmullt_lane[_s64](svint32_t op1, svint32_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyWideningUpper(Vector op1, Vector op2, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svmullt[_u16](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector MultiplyWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svmullt[_u32](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector MultiplyWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svmullt_lane[_u32](svuint16_t op1, svuint16_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyWideningUpper(Vector op1, Vector op2, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svmullt[_u64](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector MultiplyWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svmullt_lane[_u64](svuint32_t op1, svuint32_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyWideningUpper(Vector op1, Vector op2, ulong imm_index) { throw new PlatformNotSupportedException(); } + + + /// NoMatch : Detect no matching elements + + /// + /// svbool_t svnmatch[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector NoMatch(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svnmatch[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector NoMatch(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svnmatch[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector NoMatch(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svnmatch[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector NoMatch(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// PolynomialMultiply : Polynomial multiply + + /// + /// svuint8_t svpmul[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector PolynomialMultiply(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// PolynomialMultiplyWideningLower : Polynomial multiply long (bottom) + + /// + /// svuint8_t svpmullb_pair[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svpmullb[_u16](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svpmullb_pair[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svpmullb[_u64](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// PolynomialMultiplyWideningUpper : Polynomial multiply long (top) + + /// + /// svuint8_t svpmullt_pair[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svpmullt[_u16](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svpmullt_pair[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svpmullt[_u64](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// ReciprocalEstimate : Reciprocal estimate + + /// + /// svuint32_t svrecpe[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// svuint32_t svrecpe[_u32]_x(svbool_t pg, svuint32_t op) + /// svuint32_t svrecpe[_u32]_z(svbool_t pg, svuint32_t op) + /// + public static unsafe Vector ReciprocalEstimate(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ReciprocalSqrtEstimate : Reciprocal square root estimate + + /// + /// svuint32_t svrsqrte[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// svuint32_t svrsqrte[_u32]_x(svbool_t pg, svuint32_t op) + /// svuint32_t svrsqrte[_u32]_z(svbool_t pg, svuint32_t op) + /// + public static unsafe Vector ReciprocalSqrtEstimate(Vector value) { throw new PlatformNotSupportedException(); } + + + /// RoundingAddHighNarowingLower : Rounding add narrow high part (bottom) + + /// + /// svint8_t svraddhnb[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector RoundingAddHighNarowingLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svraddhnb[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector RoundingAddHighNarowingLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svraddhnb[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector RoundingAddHighNarowingLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svraddhnb[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector RoundingAddHighNarowingLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svraddhnb[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector RoundingAddHighNarowingLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svraddhnb[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector RoundingAddHighNarowingLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// RoundingAddHighNarowingUpper : Rounding add narrow high part (top) + + /// + /// svint8_t svraddhnt[_s16](svint8_t even, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svraddhnt[_s32](svint16_t even, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svraddhnt[_s64](svint32_t even, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svraddhnt[_u16](svuint8_t even, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svraddhnt[_u32](svuint16_t even, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svraddhnt[_u64](svuint32_t even, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// RoundingHalvingAdd : Rounding halving add + + /// + /// svint8_t svrhadd[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svrhadd[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svrhadd[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svrhadd[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svrhadd[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svrhadd[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svrhadd[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svrhadd[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svrhadd[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svrhadd[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svrhadd[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svrhadd[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svrhadd[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svrhadd[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svrhadd[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svrhadd[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svrhadd[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svrhadd[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svrhadd[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svrhadd[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svrhadd[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svrhadd[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svrhadd[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svrhadd[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// RoundingSubtractHighNarowingLower : Rounding subtract narrow high part (bottom) + + /// + /// svint8_t svrsubhnb[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svrsubhnb[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svrsubhnb[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svrsubhnb[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svrsubhnb[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svrsubhnb[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// RoundingSubtractHighNarowingUpper : Rounding subtract narrow high part (top) + + /// + /// svint8_t svrsubhnt[_s16](svint8_t even, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svrsubhnt[_s32](svint16_t even, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svrsubhnt[_s64](svint32_t even, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svrsubhnt[_u16](svuint8_t even, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svrsubhnt[_u32](svuint16_t even, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svrsubhnt[_u64](svuint32_t even, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// SaturatingAbs : Saturating absolute value + + /// + /// svint8_t svqabs[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) + /// svint8_t svqabs[_s8]_x(svbool_t pg, svint8_t op) + /// svint8_t svqabs[_s8]_z(svbool_t pg, svint8_t op) + /// + public static unsafe Vector SaturatingAbs(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqabs[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) + /// svint16_t svqabs[_s16]_x(svbool_t pg, svint16_t op) + /// svint16_t svqabs[_s16]_z(svbool_t pg, svint16_t op) + /// + public static unsafe Vector SaturatingAbs(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqabs[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// svint32_t svqabs[_s32]_x(svbool_t pg, svint32_t op) + /// svint32_t svqabs[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector SaturatingAbs(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqabs[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// svint64_t svqabs[_s64]_x(svbool_t pg, svint64_t op) + /// svint64_t svqabs[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector SaturatingAbs(Vector value) { throw new PlatformNotSupportedException(); } + + + /// SaturatingComplexAddRotate : Saturating complex add with rotate + + /// + /// svint8_t svqcadd[_s8](svint8_t op1, svint8_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector SaturatingComplexAddRotate(Vector op1, Vector op2, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqcadd[_s16](svint16_t op1, svint16_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector SaturatingComplexAddRotate(Vector op1, Vector op2, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqcadd[_s32](svint32_t op1, svint32_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector SaturatingComplexAddRotate(Vector op1, Vector op2, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqcadd[_s64](svint64_t op1, svint64_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector SaturatingComplexAddRotate(Vector op1, Vector op2, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + + /// SaturatingDoublingMultiplyAddWideningLower : Saturating doubling multiply-add long (bottom) + + /// + /// svint16_t svqdmlalb[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqdmlalb[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqdmlalb_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqdmlalb[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqdmlalb_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + + /// SaturatingDoublingMultiplyAddWideningLowerUpper : Saturating doubling multiply-add long (bottom × top) + + /// + /// svint16_t svqdmlalbt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLowerUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqdmlalbt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLowerUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqdmlalbt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLowerUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + + /// SaturatingDoublingMultiplyAddWideningUpper : Saturating doubling multiply-add long (top) + + /// + /// svint16_t svqdmlalt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqdmlalt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqdmlalt_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqdmlalt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqdmlalt_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + + /// SaturatingDoublingMultiplyHigh : Saturating doubling multiply high + + /// + /// svint8_t svqdmulh[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqdmulh[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqdmulh_lane[_s16](svint16_t op1, svint16_t op2, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector op1, Vector op2, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqdmulh[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqdmulh_lane[_s32](svint32_t op1, svint32_t op2, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector op1, Vector op2, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqdmulh[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqdmulh_lane[_s64](svint64_t op1, svint64_t op2, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector op1, Vector op2, ulong imm_index) { throw new PlatformNotSupportedException(); } + + + /// SaturatingDoublingMultiplySubtractWideningLower : Saturating doubling multiply-subtract long (bottom) + + /// + /// svint16_t svqdmlslb[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqdmlslb[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqdmlslb_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqdmlslb[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqdmlslb_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + + /// SaturatingDoublingMultiplySubtractWideningLowerUpper : Saturating doubling multiply-subtract long (bottom × top) + + /// + /// svint16_t svqdmlslbt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLowerUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqdmlslbt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLowerUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqdmlslbt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLowerUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + + /// SaturatingDoublingMultiplySubtractWideningUpper : Saturating doubling multiply-subtract long (top) + + /// + /// svint16_t svqdmlslt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqdmlslt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqdmlslt_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqdmlslt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqdmlslt_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + + /// SaturatingDoublingMultiplyWideningLower : Saturating doubling multiply long (bottom) + + /// + /// svint16_t svqdmullb[_s16](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqdmullb[_s32](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqdmullb_lane[_s32](svint16_t op1, svint16_t op2, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningLower(Vector op1, Vector op2, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqdmullb[_s64](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqdmullb_lane[_s64](svint32_t op1, svint32_t op2, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningLower(Vector op1, Vector op2, ulong imm_index) { throw new PlatformNotSupportedException(); } + + + /// SaturatingDoublingMultiplyWideningUpper : Saturating doubling multiply long (top) + + /// + /// svint16_t svqdmullt[_s16](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqdmullt[_s32](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqdmullt_lane[_s32](svint16_t op1, svint16_t op2, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningUpper(Vector op1, Vector op2, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqdmullt[_s64](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqdmullt_lane[_s64](svint32_t op1, svint32_t op2, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningUpper(Vector op1, Vector op2, ulong imm_index) { throw new PlatformNotSupportedException(); } + + + /// SaturatingExtractNarrowingLower : Saturating extract narrow (bottom) + + /// + /// svint8_t svqxtnb[_s16](svint16_t op) + /// + public static unsafe Vector SaturatingExtractNarrowingLower(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqxtnb[_s32](svint32_t op) + /// + public static unsafe Vector SaturatingExtractNarrowingLower(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqxtnb[_s64](svint64_t op) + /// + public static unsafe Vector SaturatingExtractNarrowingLower(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svqxtnb[_u16](svuint16_t op) + /// + public static unsafe Vector SaturatingExtractNarrowingLower(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svqxtnb[_u32](svuint32_t op) + /// + public static unsafe Vector SaturatingExtractNarrowingLower(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svqxtnb[_u64](svuint64_t op) + /// + public static unsafe Vector SaturatingExtractNarrowingLower(Vector value) { throw new PlatformNotSupportedException(); } + + + /// SaturatingExtractNarrowingUpper : Saturating extract narrow (top) + + /// + /// svint8_t svqxtnt[_s16](svint8_t even, svint16_t op) + /// + public static unsafe Vector SaturatingExtractNarrowingUpper(Vector even, Vector op) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqxtnt[_s32](svint16_t even, svint32_t op) + /// + public static unsafe Vector SaturatingExtractNarrowingUpper(Vector even, Vector op) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqxtnt[_s64](svint32_t even, svint64_t op) + /// + public static unsafe Vector SaturatingExtractNarrowingUpper(Vector even, Vector op) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svqxtnt[_u16](svuint8_t even, svuint16_t op) + /// + public static unsafe Vector SaturatingExtractNarrowingUpper(Vector even, Vector op) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svqxtnt[_u32](svuint16_t even, svuint32_t op) + /// + public static unsafe Vector SaturatingExtractNarrowingUpper(Vector even, Vector op) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svqxtnt[_u64](svuint32_t even, svuint64_t op) + /// + public static unsafe Vector SaturatingExtractNarrowingUpper(Vector even, Vector op) { throw new PlatformNotSupportedException(); } + + + /// SaturatingExtractUnsignedNarrowingLower : Saturating extract unsigned narrow (bottom) + + /// + /// svuint8_t svqxtunb[_s16](svint16_t op) + /// + public static unsafe Vector SaturatingExtractUnsignedNarrowingLower(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svqxtunb[_s32](svint32_t op) + /// + public static unsafe Vector SaturatingExtractUnsignedNarrowingLower(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svqxtunb[_s64](svint64_t op) + /// + public static unsafe Vector SaturatingExtractUnsignedNarrowingLower(Vector value) { throw new PlatformNotSupportedException(); } + + + /// SaturatingExtractUnsignedNarrowingUpper : Saturating extract unsigned narrow (top) + + /// + /// svuint8_t svqxtunt[_s16](svuint8_t even, svint16_t op) + /// + public static unsafe Vector SaturatingExtractUnsignedNarrowingUpper(Vector even, Vector op) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svqxtunt[_s32](svuint16_t even, svint32_t op) + /// + public static unsafe Vector SaturatingExtractUnsignedNarrowingUpper(Vector even, Vector op) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svqxtunt[_s64](svuint32_t even, svint64_t op) + /// + public static unsafe Vector SaturatingExtractUnsignedNarrowingUpper(Vector even, Vector op) { throw new PlatformNotSupportedException(); } + + + /// SaturatingNegate : Saturating negate + + /// + /// svint8_t svqneg[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) + /// svint8_t svqneg[_s8]_x(svbool_t pg, svint8_t op) + /// svint8_t svqneg[_s8]_z(svbool_t pg, svint8_t op) + /// + public static unsafe Vector SaturatingNegate(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqneg[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) + /// svint16_t svqneg[_s16]_x(svbool_t pg, svint16_t op) + /// svint16_t svqneg[_s16]_z(svbool_t pg, svint16_t op) + /// + public static unsafe Vector SaturatingNegate(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqneg[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// svint32_t svqneg[_s32]_x(svbool_t pg, svint32_t op) + /// svint32_t svqneg[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector SaturatingNegate(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqneg[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// svint64_t svqneg[_s64]_x(svbool_t pg, svint64_t op) + /// svint64_t svqneg[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector SaturatingNegate(Vector value) { throw new PlatformNotSupportedException(); } + + + /// SaturatingRoundingDoublingComplexMultiplyAddHighRotate : Saturating rounding doubling complex multiply-add high with rotate + + /// + /// svint8_t svqrdcmlah[_s8](svint8_t op1, svint8_t op2, svint8_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(Vector op1, Vector op2, Vector op3, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqrdcmlah[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(Vector op1, Vector op2, Vector op3, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqrdcmlah_lane[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// + public static unsafe Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(Vector op1, Vector op2, Vector op3, ulong imm_index, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqrdcmlah[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(Vector op1, Vector op2, Vector op3, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqrdcmlah_lane[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// + public static unsafe Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(Vector op1, Vector op2, Vector op3, ulong imm_index, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqrdcmlah[_s64](svint64_t op1, svint64_t op2, svint64_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(Vector op1, Vector op2, Vector op3, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + + /// SaturatingRoundingDoublingMultiplyAddHigh : Saturating rounding doubling multiply-add high + + /// + /// svint8_t svqrdmlah[_s8](svint8_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqrdmlah[_s16](svint16_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqrdmlah_lane[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqrdmlah[_s32](svint32_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqrdmlah_lane[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqrdmlah[_s64](svint64_t op1, svint64_t op2, svint64_t op3) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqrdmlah_lane[_s64](svint64_t op1, svint64_t op2, svint64_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + + /// SaturatingRoundingDoublingMultiplyHigh : Saturating rounding doubling multiply high + + /// + /// svint8_t svqrdmulh[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqrdmulh[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqrdmulh_lane[_s16](svint16_t op1, svint16_t op2, uint64_t imm_index) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector op1, Vector op2, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqrdmulh[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqrdmulh_lane[_s32](svint32_t op1, svint32_t op2, uint64_t imm_index) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector op1, Vector op2, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqrdmulh[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqrdmulh_lane[_s64](svint64_t op1, svint64_t op2, uint64_t imm_index) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector op1, Vector op2, ulong imm_index) { throw new PlatformNotSupportedException(); } + + + /// SaturatingRoundingDoublingMultiplySubtractHigh : Saturating rounding doubling multiply-subtract high + + /// + /// svint8_t svqrdmlsh[_s8](svint8_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqrdmlsh[_s16](svint16_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqrdmlsh_lane[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqrdmlsh[_s32](svint32_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqrdmlsh_lane[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqrdmlsh[_s64](svint64_t op1, svint64_t op2, svint64_t op3) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqrdmlsh_lane[_s64](svint64_t op1, svint64_t op2, svint64_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + + /// Scatter16BitNarrowing : Truncate to 16 bits and store, non-temporal + + /// + /// void svstnt1h_scatter[_u32base_s32](svbool_t pg, svuint32_t bases, svint32_t data) + /// + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1h_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) + /// + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1h_scatter[_u32base_u32](svbool_t pg, svuint32_t bases, svuint32_t data) + /// + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1h_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) + /// + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + + /// Scatter16BitWithByteOffsetsNarrowing : Truncate to 16 bits and store, non-temporal + + /// + /// void svstnt1h_scatter_[u32]offset[_s32](svbool_t pg, int16_t *base, svuint32_t offsets, svint32_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1h_scatter_[s64]offset[_s64](svbool_t pg, int16_t *base, svint64_t offsets, svint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1h_scatter_[u64]offset[_s64](svbool_t pg, int16_t *base, svuint64_t offsets, svint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1h_scatter_[s64]index[_s64](svbool_t pg, int16_t *base, svint64_t indices, svint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1h_scatter_[u64]index[_s64](svbool_t pg, int16_t *base, svuint64_t indices, svint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1h_scatter_[u32]offset[_u32](svbool_t pg, uint16_t *base, svuint32_t offsets, svuint32_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1h_scatter_[s64]offset[_u64](svbool_t pg, uint16_t *base, svint64_t offsets, svuint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1h_scatter_[u64]offset[_u64](svbool_t pg, uint16_t *base, svuint64_t offsets, svuint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1h_scatter_[s64]index[_u64](svbool_t pg, uint16_t *base, svint64_t indices, svuint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1h_scatter_[u64]index[_u64](svbool_t pg, uint16_t *base, svuint64_t indices, svuint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data) { throw new PlatformNotSupportedException(); } + + + /// Scatter32BitNarrowing : Truncate to 32 bits and store, non-temporal + + /// + /// void svstnt1w_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) + /// + public static unsafe void Scatter32BitNarrowing(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1w_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) + /// + public static unsafe void Scatter32BitNarrowing(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + + /// Scatter32BitWithByteOffsetsNarrowing : Truncate to 32 bits and store, non-temporal + + /// + /// void svstnt1w_scatter_[s64]offset[_s64](svbool_t pg, int32_t *base, svint64_t offsets, svint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1w_scatter_[u64]offset[_s64](svbool_t pg, int32_t *base, svuint64_t offsets, svint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1w_scatter_[s64]index[_s64](svbool_t pg, int32_t *base, svint64_t indices, svint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector indices, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1w_scatter_[u64]index[_s64](svbool_t pg, int32_t *base, svuint64_t indices, svint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector indices, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1w_scatter_[s64]offset[_u64](svbool_t pg, uint32_t *base, svint64_t offsets, svuint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1w_scatter_[u64]offset[_u64](svbool_t pg, uint32_t *base, svuint64_t offsets, svuint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1w_scatter_[s64]index[_u64](svbool_t pg, uint32_t *base, svint64_t indices, svuint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector indices, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1w_scatter_[u64]index[_u64](svbool_t pg, uint32_t *base, svuint64_t indices, svuint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector indices, Vector data) { throw new PlatformNotSupportedException(); } + + + /// Scatter8BitNarrowing : Truncate to 8 bits and store, non-temporal + + /// + /// void svstnt1b_scatter[_u32base_s32](svbool_t pg, svuint32_t bases, svint32_t data) + /// + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1b_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) + /// + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1b_scatter[_u32base_u32](svbool_t pg, svuint32_t bases, svuint32_t data) + /// + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1b_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) + /// + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + + /// Scatter8BitWithByteOffsetsNarrowing : Truncate to 8 bits and store, non-temporal + + /// + /// void svstnt1b_scatter_[u32]offset[_s32](svbool_t pg, int8_t *base, svuint32_t offsets, svint32_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1b_scatter_[s64]offset[_s64](svbool_t pg, int8_t *base, svint64_t offsets, svint64_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1b_scatter_[u64]offset[_s64](svbool_t pg, int8_t *base, svuint64_t offsets, svint64_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1b_scatter_[u32]offset[_u32](svbool_t pg, uint8_t *base, svuint32_t offsets, svuint32_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1b_scatter_[s64]offset[_u64](svbool_t pg, uint8_t *base, svint64_t offsets, svuint64_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1b_scatter_[u64]offset[_u64](svbool_t pg, uint8_t *base, svuint64_t offsets, svuint64_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + + /// ScatterNonTemporal : Non-truncating store, non-temporal + + /// + /// void svstnt1_scatter[_u32base_s32](svbool_t pg, svuint32_t bases, svint32_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1_scatter_[u32]offset[_s32](svbool_t pg, int32_t *base, svuint32_t offsets, svint32_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, int* base, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1_scatter_[s64]offset[_s64](svbool_t pg, int64_t *base, svint64_t offsets, svint64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, long* base, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1_scatter_[u64]offset[_s64](svbool_t pg, int64_t *base, svuint64_t offsets, svint64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, long* base, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1_scatter_[s64]index[_s64](svbool_t pg, int64_t *base, svint64_t indices, svint64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, long* base, Vector indices, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1_scatter_[u64]index[_s64](svbool_t pg, int64_t *base, svuint64_t indices, svint64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, long* base, Vector indices, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1_scatter[_u32base_u32](svbool_t pg, svuint32_t bases, svuint32_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1_scatter_[u32]offset[_u32](svbool_t pg, uint32_t *base, svuint32_t offsets, svuint32_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, uint* base, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1_scatter_[s64]offset[_u64](svbool_t pg, uint64_t *base, svint64_t offsets, svuint64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, ulong* base, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1_scatter_[u64]offset[_u64](svbool_t pg, uint64_t *base, svuint64_t offsets, svuint64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, ulong* base, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1_scatter_[s64]index[_u64](svbool_t pg, uint64_t *base, svint64_t indices, svuint64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, ulong* base, Vector indices, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1_scatter_[u64]index[_u64](svbool_t pg, uint64_t *base, svuint64_t indices, svuint64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, ulong* base, Vector indices, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1_scatter[_u32base_f32](svbool_t pg, svuint32_t bases, svfloat32_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1_scatter_[u32]offset[_f32](svbool_t pg, float32_t *base, svuint32_t offsets, svfloat32_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, float* base, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1_scatter_[s64]offset[_f64](svbool_t pg, float64_t *base, svint64_t offsets, svfloat64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, double* base, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1_scatter_[s64]index[_f64](svbool_t pg, float64_t *base, svint64_t indices, svfloat64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, double* base, Vector indices, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1_scatter[_u64base_f64](svbool_t pg, svuint64_t bases, svfloat64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, Vector addresses, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1_scatter_[u64]offset[_f64](svbool_t pg, float64_t *base, svuint64_t offsets, svfloat64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, double* base, Vector offsets, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svstnt1_scatter_[u64]index[_f64](svbool_t pg, float64_t *base, svuint64_t indices, svfloat64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, double* base, Vector indices, Vector data) { throw new PlatformNotSupportedException(); } + + + /// ShiftArithmeticRounded : Rounding shift left + + /// + /// svint8_t svrshl[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svrshl[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svrshl[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector ShiftArithmeticRounded(Vector value, Vector count) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svrshl[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svrshl[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svrshl[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector ShiftArithmeticRounded(Vector value, Vector count) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svrshl[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svrshl[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svrshl[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector ShiftArithmeticRounded(Vector value, Vector count) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svrshl[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svrshl[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svrshl[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector ShiftArithmeticRounded(Vector value, Vector count) { throw new PlatformNotSupportedException(); } + + + /// ShiftArithmeticRoundedSaturate : Saturating rounding shift left + + /// + /// svint8_t svqrshl[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svqrshl[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svqrshl[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector ShiftArithmeticRoundedSaturate(Vector value, Vector count) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqrshl[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svqrshl[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svqrshl[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector ShiftArithmeticRoundedSaturate(Vector value, Vector count) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqrshl[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svqrshl[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svqrshl[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector ShiftArithmeticRoundedSaturate(Vector value, Vector count) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqrshl[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svqrshl[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svqrshl[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector ShiftArithmeticRoundedSaturate(Vector value, Vector count) { throw new PlatformNotSupportedException(); } + + + /// ShiftArithmeticSaturate : Saturating shift left + + /// + /// svint8_t svqshl[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svqshl[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svqshl[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector ShiftArithmeticSaturate(Vector value, Vector count) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqshl[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svqshl[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svqshl[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector ShiftArithmeticSaturate(Vector value, Vector count) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqshl[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svqshl[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svqshl[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector ShiftArithmeticSaturate(Vector value, Vector count) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqshl[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svqshl[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svqshl[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector ShiftArithmeticSaturate(Vector value, Vector count) { throw new PlatformNotSupportedException(); } + + + /// ShiftLeftAndInsert : Shift left and insert + + /// + /// svint8_t svsli[_n_s8](svint8_t op1, svint8_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svsli[_n_s16](svint16_t op1, svint16_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svsli[_n_s32](svint32_t op1, svint32_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svsli[_n_s64](svint64_t op1, svint64_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svsli[_n_u8](svuint8_t op1, svuint8_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svsli[_n_u16](svuint16_t op1, svuint16_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svsli[_n_u32](svuint32_t op1, svuint32_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svsli[_n_u64](svuint64_t op1, svuint64_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) { throw new PlatformNotSupportedException(); } + + + /// ShiftLeftLogicalSaturate : Saturating shift left + + /// + /// svuint8_t svqshl[_u8]_m(svbool_t pg, svuint8_t op1, svint8_t op2) + /// svuint8_t svqshl[_u8]_x(svbool_t pg, svuint8_t op1, svint8_t op2) + /// svuint8_t svqshl[_u8]_z(svbool_t pg, svuint8_t op1, svint8_t op2) + /// + public static unsafe Vector ShiftLeftLogicalSaturate(Vector value, Vector count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svqshl[_u16]_m(svbool_t pg, svuint16_t op1, svint16_t op2) + /// svuint16_t svqshl[_u16]_x(svbool_t pg, svuint16_t op1, svint16_t op2) + /// svuint16_t svqshl[_u16]_z(svbool_t pg, svuint16_t op1, svint16_t op2) + /// + public static unsafe Vector ShiftLeftLogicalSaturate(Vector value, Vector count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svqshl[_u32]_m(svbool_t pg, svuint32_t op1, svint32_t op2) + /// svuint32_t svqshl[_u32]_x(svbool_t pg, svuint32_t op1, svint32_t op2) + /// svuint32_t svqshl[_u32]_z(svbool_t pg, svuint32_t op1, svint32_t op2) + /// + public static unsafe Vector ShiftLeftLogicalSaturate(Vector value, Vector count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svqshl[_u64]_m(svbool_t pg, svuint64_t op1, svint64_t op2) + /// svuint64_t svqshl[_u64]_x(svbool_t pg, svuint64_t op1, svint64_t op2) + /// svuint64_t svqshl[_u64]_z(svbool_t pg, svuint64_t op1, svint64_t op2) + /// + public static unsafe Vector ShiftLeftLogicalSaturate(Vector value, Vector count) { throw new PlatformNotSupportedException(); } + + + /// ShiftLeftLogicalSaturateUnsigned : Saturating shift left unsigned + + /// + /// svuint8_t svqshlu[_n_s8]_m(svbool_t pg, svint8_t op1, uint64_t imm2) + /// svuint8_t svqshlu[_n_s8]_x(svbool_t pg, svint8_t op1, uint64_t imm2) + /// svuint8_t svqshlu[_n_s8]_z(svbool_t pg, svint8_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalSaturateUnsigned(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svqshlu[_n_s16]_m(svbool_t pg, svint16_t op1, uint64_t imm2) + /// svuint16_t svqshlu[_n_s16]_x(svbool_t pg, svint16_t op1, uint64_t imm2) + /// svuint16_t svqshlu[_n_s16]_z(svbool_t pg, svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalSaturateUnsigned(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svqshlu[_n_s32]_m(svbool_t pg, svint32_t op1, uint64_t imm2) + /// svuint32_t svqshlu[_n_s32]_x(svbool_t pg, svint32_t op1, uint64_t imm2) + /// svuint32_t svqshlu[_n_s32]_z(svbool_t pg, svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalSaturateUnsigned(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svqshlu[_n_s64]_m(svbool_t pg, svint64_t op1, uint64_t imm2) + /// svuint64_t svqshlu[_n_s64]_x(svbool_t pg, svint64_t op1, uint64_t imm2) + /// svuint64_t svqshlu[_n_s64]_z(svbool_t pg, svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalSaturateUnsigned(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + + /// ShiftLeftLogicalWideningEven : Shift left long (bottom) + + /// + /// svint16_t svshllb[_n_s16](svint8_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalWideningEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svshllb[_n_s32](svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalWideningEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svshllb[_n_s64](svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalWideningEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svshllb[_n_u16](svuint8_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalWideningEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svshllb[_n_u32](svuint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalWideningEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svshllb[_n_u64](svuint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalWideningEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + + /// ShiftLeftLogicalWideningOdd : Shift left long (top) + + /// + /// svint16_t svshllt[_n_s16](svint8_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalWideningOdd(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svshllt[_n_s32](svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalWideningOdd(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svshllt[_n_s64](svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalWideningOdd(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svshllt[_n_u16](svuint8_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalWideningOdd(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svshllt[_n_u32](svuint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalWideningOdd(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svshllt[_n_u64](svuint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalWideningOdd(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + + /// ShiftLogicalRounded : Rounding shift left + + /// + /// svuint8_t svrshl[_u8]_m(svbool_t pg, svuint8_t op1, svint8_t op2) + /// svuint8_t svrshl[_u8]_x(svbool_t pg, svuint8_t op1, svint8_t op2) + /// svuint8_t svrshl[_u8]_z(svbool_t pg, svuint8_t op1, svint8_t op2) + /// + public static unsafe Vector ShiftLogicalRounded(Vector value, Vector count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svrshl[_u16]_m(svbool_t pg, svuint16_t op1, svint16_t op2) + /// svuint16_t svrshl[_u16]_x(svbool_t pg, svuint16_t op1, svint16_t op2) + /// svuint16_t svrshl[_u16]_z(svbool_t pg, svuint16_t op1, svint16_t op2) + /// + public static unsafe Vector ShiftLogicalRounded(Vector value, Vector count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svrshl[_u32]_m(svbool_t pg, svuint32_t op1, svint32_t op2) + /// svuint32_t svrshl[_u32]_x(svbool_t pg, svuint32_t op1, svint32_t op2) + /// svuint32_t svrshl[_u32]_z(svbool_t pg, svuint32_t op1, svint32_t op2) + /// + public static unsafe Vector ShiftLogicalRounded(Vector value, Vector count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svrshl[_u64]_m(svbool_t pg, svuint64_t op1, svint64_t op2) + /// svuint64_t svrshl[_u64]_x(svbool_t pg, svuint64_t op1, svint64_t op2) + /// svuint64_t svrshl[_u64]_z(svbool_t pg, svuint64_t op1, svint64_t op2) + /// + public static unsafe Vector ShiftLogicalRounded(Vector value, Vector count) { throw new PlatformNotSupportedException(); } + + + /// ShiftLogicalRoundedSaturate : Saturating rounding shift left + + /// + /// svuint8_t svqrshl[_u8]_m(svbool_t pg, svuint8_t op1, svint8_t op2) + /// svuint8_t svqrshl[_u8]_x(svbool_t pg, svuint8_t op1, svint8_t op2) + /// svuint8_t svqrshl[_u8]_z(svbool_t pg, svuint8_t op1, svint8_t op2) + /// + public static unsafe Vector ShiftLogicalRoundedSaturate(Vector value, Vector count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svqrshl[_u16]_m(svbool_t pg, svuint16_t op1, svint16_t op2) + /// svuint16_t svqrshl[_u16]_x(svbool_t pg, svuint16_t op1, svint16_t op2) + /// svuint16_t svqrshl[_u16]_z(svbool_t pg, svuint16_t op1, svint16_t op2) + /// + public static unsafe Vector ShiftLogicalRoundedSaturate(Vector value, Vector count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svqrshl[_u32]_m(svbool_t pg, svuint32_t op1, svint32_t op2) + /// svuint32_t svqrshl[_u32]_x(svbool_t pg, svuint32_t op1, svint32_t op2) + /// svuint32_t svqrshl[_u32]_z(svbool_t pg, svuint32_t op1, svint32_t op2) + /// + public static unsafe Vector ShiftLogicalRoundedSaturate(Vector value, Vector count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svqrshl[_u64]_m(svbool_t pg, svuint64_t op1, svint64_t op2) + /// svuint64_t svqrshl[_u64]_x(svbool_t pg, svuint64_t op1, svint64_t op2) + /// svuint64_t svqrshl[_u64]_z(svbool_t pg, svuint64_t op1, svint64_t op2) + /// + public static unsafe Vector ShiftLogicalRoundedSaturate(Vector value, Vector count) { throw new PlatformNotSupportedException(); } + + + /// ShiftRightAndInsert : Shift right and insert + + /// + /// svint8_t svsri[_n_s8](svint8_t op1, svint8_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svsri[_n_s16](svint16_t op1, svint16_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svsri[_n_s32](svint32_t op1, svint32_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svsri[_n_s64](svint64_t op1, svint64_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svsri[_n_u8](svuint8_t op1, svuint8_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svsri[_n_u16](svuint16_t op1, svuint16_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svsri[_n_u32](svuint32_t op1, svuint32_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svsri[_n_u64](svuint64_t op1, svuint64_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) { throw new PlatformNotSupportedException(); } + + + /// ShiftRightArithmeticAdd : Shift right and accumulate + + /// + /// svint8_t svsra[_n_s8](svint8_t op1, svint8_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightArithmeticAdd(Vector addend, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svsra[_n_s16](svint16_t op1, svint16_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightArithmeticAdd(Vector addend, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svsra[_n_s32](svint32_t op1, svint32_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightArithmeticAdd(Vector addend, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svsra[_n_s64](svint64_t op1, svint64_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightArithmeticAdd(Vector addend, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + + /// ShiftRightArithmeticNarrowingSaturateEven : Saturating shift right narrow (bottom) + + /// + /// svint8_t svqshrnb[_n_s16](svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqshrnb[_n_s32](svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqshrnb[_n_s64](svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svqshrnb[_n_u16](svuint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svqshrnb[_n_u32](svuint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svqshrnb[_n_u64](svuint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + + /// ShiftRightArithmeticNarrowingSaturateOdd : Saturating shift right narrow (top) + + /// + /// svint8_t svqshrnt[_n_s16](svint8_t even, svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqshrnt[_n_s32](svint16_t even, svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqshrnt[_n_s64](svint32_t even, svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svqshrnt[_n_u16](svuint8_t even, svuint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svqshrnt[_n_u32](svuint16_t even, svuint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svqshrnt[_n_u64](svuint32_t even, svuint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + + /// ShiftRightArithmeticNarrowingSaturateUnsignedEven : Saturating shift right unsigned narrow (bottom) + + /// + /// svuint8_t svqshrunb[_n_s16](svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svqshrunb[_n_s32](svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svqshrunb[_n_s64](svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + + /// ShiftRightArithmeticNarrowingSaturateUnsignedOdd : Saturating shift right unsigned narrow (top) + + /// + /// svuint8_t svqshrunt[_n_s16](svuint8_t even, svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svqshrunt[_n_s32](svuint16_t even, svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svqshrunt[_n_s64](svuint32_t even, svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + + /// ShiftRightArithmeticRounded : Rounding shift right + + /// + /// svint8_t svrshr[_n_s8]_m(svbool_t pg, svint8_t op1, uint64_t imm2) + /// svint8_t svrshr[_n_s8]_x(svbool_t pg, svint8_t op1, uint64_t imm2) + /// svint8_t svrshr[_n_s8]_z(svbool_t pg, svint8_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRounded(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svrshr[_n_s16]_m(svbool_t pg, svint16_t op1, uint64_t imm2) + /// svint16_t svrshr[_n_s16]_x(svbool_t pg, svint16_t op1, uint64_t imm2) + /// svint16_t svrshr[_n_s16]_z(svbool_t pg, svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRounded(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svrshr[_n_s32]_m(svbool_t pg, svint32_t op1, uint64_t imm2) + /// svint32_t svrshr[_n_s32]_x(svbool_t pg, svint32_t op1, uint64_t imm2) + /// svint32_t svrshr[_n_s32]_z(svbool_t pg, svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRounded(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svrshr[_n_s64]_m(svbool_t pg, svint64_t op1, uint64_t imm2) + /// svint64_t svrshr[_n_s64]_x(svbool_t pg, svint64_t op1, uint64_t imm2) + /// svint64_t svrshr[_n_s64]_z(svbool_t pg, svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRounded(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + + /// ShiftRightArithmeticRoundedAdd : Rounding shift right and accumulate + + /// + /// svint8_t svrsra[_n_s8](svint8_t op1, svint8_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightArithmeticRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svrsra[_n_s16](svint16_t op1, svint16_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightArithmeticRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svrsra[_n_s32](svint32_t op1, svint32_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightArithmeticRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svrsra[_n_s64](svint64_t op1, svint64_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightArithmeticRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + + /// ShiftRightArithmeticRoundedNarrowingSaturateEven : Saturating rounding shift right narrow (bottom) + + /// + /// svint8_t svqrshrnb[_n_s16](svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqrshrnb[_n_s32](svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqrshrnb[_n_s64](svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + + /// ShiftRightArithmeticRoundedNarrowingSaturateOdd : Saturating rounding shift right narrow (top) + + /// + /// svint8_t svqrshrnt[_n_s16](svint8_t even, svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqrshrnt[_n_s32](svint16_t even, svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqrshrnt[_n_s64](svint32_t even, svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + + /// ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven : Saturating rounding shift right unsigned narrow (bottom) + + /// + /// svuint8_t svqrshrunb[_n_s16](svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svqrshrunb[_n_s32](svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svqrshrunb[_n_s64](svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + + /// ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd : Saturating rounding shift right unsigned narrow (top) + + /// + /// svuint8_t svqrshrunt[_n_s16](svuint8_t even, svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svqrshrunt[_n_s32](svuint16_t even, svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svqrshrunt[_n_s64](svuint32_t even, svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + + /// ShiftRightLogicalAdd : Shift right and accumulate + + /// + /// svuint8_t svsra[_n_u8](svuint8_t op1, svuint8_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightLogicalAdd(Vector addend, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svsra[_n_u16](svuint16_t op1, svuint16_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightLogicalAdd(Vector addend, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svsra[_n_u32](svuint32_t op1, svuint32_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightLogicalAdd(Vector addend, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svsra[_n_u64](svuint64_t op1, svuint64_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightLogicalAdd(Vector addend, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + + /// ShiftRightLogicalNarrowingEven : Shift right narrow (bottom) + + /// + /// svint8_t svshrnb[_n_s16](svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalNarrowingEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svshrnb[_n_s32](svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalNarrowingEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svshrnb[_n_s64](svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalNarrowingEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svshrnb[_n_u16](svuint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalNarrowingEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svshrnb[_n_u32](svuint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalNarrowingEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svshrnb[_n_u64](svuint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalNarrowingEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + + /// ShiftRightLogicalNarrowingOdd : Shift right narrow (top) + + /// + /// svint8_t svshrnt[_n_s16](svint8_t even, svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svshrnt[_n_s32](svint16_t even, svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svshrnt[_n_s64](svint32_t even, svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svshrnt[_n_u16](svuint8_t even, svuint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svshrnt[_n_u32](svuint16_t even, svuint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svshrnt[_n_u64](svuint32_t even, svuint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + + /// ShiftRightLogicalRounded : Rounding shift right + + /// + /// svuint8_t svrshr[_n_u8]_m(svbool_t pg, svuint8_t op1, uint64_t imm2) + /// svuint8_t svrshr[_n_u8]_x(svbool_t pg, svuint8_t op1, uint64_t imm2) + /// svuint8_t svrshr[_n_u8]_z(svbool_t pg, svuint8_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRounded(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svrshr[_n_u16]_m(svbool_t pg, svuint16_t op1, uint64_t imm2) + /// svuint16_t svrshr[_n_u16]_x(svbool_t pg, svuint16_t op1, uint64_t imm2) + /// svuint16_t svrshr[_n_u16]_z(svbool_t pg, svuint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRounded(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svrshr[_n_u32]_m(svbool_t pg, svuint32_t op1, uint64_t imm2) + /// svuint32_t svrshr[_n_u32]_x(svbool_t pg, svuint32_t op1, uint64_t imm2) + /// svuint32_t svrshr[_n_u32]_z(svbool_t pg, svuint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRounded(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svrshr[_n_u64]_m(svbool_t pg, svuint64_t op1, uint64_t imm2) + /// svuint64_t svrshr[_n_u64]_x(svbool_t pg, svuint64_t op1, uint64_t imm2) + /// svuint64_t svrshr[_n_u64]_z(svbool_t pg, svuint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRounded(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + + /// ShiftRightLogicalRoundedAdd : Rounding shift right and accumulate + + /// + /// svuint8_t svrsra[_n_u8](svuint8_t op1, svuint8_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightLogicalRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svrsra[_n_u16](svuint16_t op1, svuint16_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightLogicalRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svrsra[_n_u32](svuint32_t op1, svuint32_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightLogicalRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svrsra[_n_u64](svuint64_t op1, svuint64_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightLogicalRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + + /// ShiftRightLogicalRoundedNarrowingEven : Rounding shift right narrow (bottom) + + /// + /// svint8_t svrshrnb[_n_s16](svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svrshrnb[_n_s32](svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svrshrnb[_n_s64](svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svrshrnb[_n_u16](svuint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svrshrnb[_n_u32](svuint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svrshrnb[_n_u64](svuint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + + /// ShiftRightLogicalRoundedNarrowingOdd : Rounding shift right narrow (top) + + /// + /// svint8_t svrshrnt[_n_s16](svint8_t even, svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svrshrnt[_n_s32](svint16_t even, svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svrshrnt[_n_s64](svint32_t even, svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svrshrnt[_n_u16](svuint8_t even, svuint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svrshrnt[_n_u32](svuint16_t even, svuint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svrshrnt[_n_u64](svuint32_t even, svuint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + + /// ShiftRightLogicalRoundedNarrowingSaturateEven : Saturating rounding shift right narrow (bottom) + + /// + /// svuint8_t svqrshrnb[_n_u16](svuint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svqrshrnb[_n_u32](svuint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svqrshrnb[_n_u64](svuint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + + /// ShiftRightLogicalRoundedNarrowingSaturateOdd : Saturating rounding shift right narrow (top) + + /// + /// svuint8_t svqrshrnt[_n_u16](svuint8_t even, svuint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svqrshrnt[_n_u32](svuint16_t even, svuint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svqrshrnt[_n_u64](svuint32_t even, svuint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + + /// SubtractHighNarowingLower : Subtract narrow high part (bottom) + + /// + /// svint8_t svsubhnb[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector SubtractHighNarowingLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svsubhnb[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector SubtractHighNarowingLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svsubhnb[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector SubtractHighNarowingLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svsubhnb[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector SubtractHighNarowingLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svsubhnb[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector SubtractHighNarowingLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svsubhnb[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector SubtractHighNarowingLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// SubtractHighNarowingUpper : Subtract narrow high part (top) + + /// + /// svint8_t svsubhnt[_s16](svint8_t even, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svsubhnt[_s32](svint16_t even, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svsubhnt[_s64](svint32_t even, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svsubhnt[_u16](svuint8_t even, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svsubhnt[_u32](svuint16_t even, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svsubhnt[_u64](svuint32_t even, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// SubtractSaturate : Saturating subtract + + /// + /// svint8_t svqsub[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svqsub[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svqsub[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqsub[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svqsub[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svqsub[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqsub[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svqsub[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svqsub[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqsub[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svqsub[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svqsub[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svqsub[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svqsub[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svqsub[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svqsub[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svqsub[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svqsub[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svqsub[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svqsub[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svqsub[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svqsub[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svqsub[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svqsub[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// SubtractSaturateReversed : Saturating subtract reversed + + /// + /// svint8_t svqsubr[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svqsubr[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svqsubr[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svqsubr[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svqsubr[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svqsubr[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svqsubr[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svqsubr[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svqsubr[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svqsubr[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svqsubr[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svqsubr[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svqsubr[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svqsubr[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svqsubr[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svqsubr[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svqsubr[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svqsubr[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svqsubr[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svqsubr[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svqsubr[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svqsubr[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svqsubr[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svqsubr[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// SubtractWideLower : Subtract wide (bottom) + + /// + /// svint16_t svsubwb[_s16](svint16_t op1, svint8_t op2) + /// + public static unsafe Vector SubtractWideLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svsubwb[_s32](svint32_t op1, svint16_t op2) + /// + public static unsafe Vector SubtractWideLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svsubwb[_s64](svint64_t op1, svint32_t op2) + /// + public static unsafe Vector SubtractWideLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svsubwb[_u16](svuint16_t op1, svuint8_t op2) + /// + public static unsafe Vector SubtractWideLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svsubwb[_u32](svuint32_t op1, svuint16_t op2) + /// + public static unsafe Vector SubtractWideLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svsubwb[_u64](svuint64_t op1, svuint32_t op2) + /// + public static unsafe Vector SubtractWideLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// SubtractWideUpper : Subtract wide (top) + + /// + /// svint16_t svsubwt[_s16](svint16_t op1, svint8_t op2) + /// + public static unsafe Vector SubtractWideUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svsubwt[_s32](svint32_t op1, svint16_t op2) + /// + public static unsafe Vector SubtractWideUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svsubwt[_s64](svint64_t op1, svint32_t op2) + /// + public static unsafe Vector SubtractWideUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svsubwt[_u16](svuint16_t op1, svuint8_t op2) + /// + public static unsafe Vector SubtractWideUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svsubwt[_u32](svuint32_t op1, svuint16_t op2) + /// + public static unsafe Vector SubtractWideUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svsubwt[_u64](svuint64_t op1, svuint32_t op2) + /// + public static unsafe Vector SubtractWideUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// SubtractWideningLower : Subtract long (bottom) + + /// + /// svint16_t svsublb[_s16](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector SubtractWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svsublb[_s32](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector SubtractWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svsublb[_s64](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector SubtractWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svsublb[_u16](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector SubtractWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svsublb[_u32](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector SubtractWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svsublb[_u64](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector SubtractWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// SubtractWideningLowerUpper : Subtract long (bottom - top) + + /// + /// svint16_t svsublbt[_s16](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector SubtractWideningLowerUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svsublbt[_s32](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector SubtractWideningLowerUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svsublbt[_s64](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector SubtractWideningLowerUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// SubtractWideningUpper : Subtract long (top) + + /// + /// svint16_t svsublt[_s16](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector SubtractWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svsublt[_s32](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector SubtractWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svsublt[_s64](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector SubtractWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svsublt[_u16](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector SubtractWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svsublt[_u32](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector SubtractWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svsublt[_u64](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector SubtractWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// SubtractWideningUpperLower : Subtract long (top - bottom) + + /// + /// svint16_t svsubltb[_s16](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector SubtractWideningUpperLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svsubltb[_s32](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector SubtractWideningUpperLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svsubltb[_s64](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector SubtractWideningUpperLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// SubtractWithBorrowWideningLower : Subtract with borrow long (bottom) + + /// + /// svuint32_t svsbclb[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector SubtractWithBorrowWideningLower(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svsbclb[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// + public static unsafe Vector SubtractWithBorrowWideningLower(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + + /// SubtractWithBorrowWideningUpper : Subtract with borrow long (top) + + /// + /// svuint32_t svsbclt[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector SubtractWithBorrowWideningUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svsbclt[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// + public static unsafe Vector SubtractWithBorrowWideningUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + + /// UpConvertWideningUpper : Up convert long (top) + + /// + /// svfloat64_t svcvtlt_f64[_f32]_m(svfloat64_t inactive, svbool_t pg, svfloat32_t op) + /// svfloat64_t svcvtlt_f64[_f32]_x(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector UpConvertWideningUpper(Vector value) { throw new PlatformNotSupportedException(); } + + + /// VectorTableLookup : Table lookup in two-vector table + + /// + /// svint8_t svtbl2[_s8](svint8x2_t data, svuint8_t indices) + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svtbl2[_s16](svint16x2_t data, svuint16_t indices) + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svtbl2[_s32](svint32x2_t data, svuint32_t indices) + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svtbl2[_s64](svint64x2_t data, svuint64_t indices) + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svtbl2[_u8](svuint8x2_t data, svuint8_t indices) + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svtbl2[_u16](svuint16x2_t data, svuint16_t indices) + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svtbl2[_u32](svuint32x2_t data, svuint32_t indices) + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svtbl2[_u64](svuint64x2_t data, svuint64_t indices) + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svtbl2[_f32](svfloat32x2_t data, svuint32_t indices) + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svtbl2[_f64](svfloat64x2_t data, svuint64_t indices) + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) { throw new PlatformNotSupportedException(); } + + + /// VectorTableLookupExtension : Table lookup in single-vector table (merging) + + /// + /// svint8_t svtbx[_s8](svint8_t fallback, svint8_t data, svuint8_t indices) + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svtbx[_s16](svint16_t fallback, svint16_t data, svuint16_t indices) + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svtbx[_s32](svint32_t fallback, svint32_t data, svuint32_t indices) + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svtbx[_s64](svint64_t fallback, svint64_t data, svuint64_t indices) + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svtbx[_u8](svuint8_t fallback, svuint8_t data, svuint8_t indices) + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svtbx[_u16](svuint16_t fallback, svuint16_t data, svuint16_t indices) + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svtbx[_u32](svuint32_t fallback, svuint32_t data, svuint32_t indices) + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svtbx[_u64](svuint64_t fallback, svuint64_t data, svuint64_t indices) + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svtbx[_f32](svfloat32_t fallback, svfloat32_t data, svuint32_t indices) + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svtbx[_f64](svfloat64_t fallback, svfloat64_t data, svuint64_t indices) + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) { throw new PlatformNotSupportedException(); } + + + /// Xor : Bitwise exclusive OR of three vectors + + /// + /// svint8_t sveor3[_s8](svint8_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t sveor3[_s16](svint16_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t sveor3[_s32](svint32_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t sveor3[_s64](svint64_t op1, svint64_t op2, svint64_t op3) + /// + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t sveor3[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t sveor3[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t sveor3[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t sveor3[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3) { throw new PlatformNotSupportedException(); } + + + /// XorRotateRight : Bitwise exclusive OR and rotate right + + /// + /// svint8_t svxar[_n_s8](svint8_t op1, svint8_t op2, uint64_t imm3) + /// + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svxar[_n_s16](svint16_t op1, svint16_t op2, uint64_t imm3) + /// + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svxar[_n_s32](svint32_t op1, svint32_t op2, uint64_t imm3) + /// + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svxar[_n_s64](svint64_t op1, svint64_t op2, uint64_t imm3) + /// + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svxar[_n_u8](svuint8_t op1, svuint8_t op2, uint64_t imm3) + /// + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svxar[_n_u16](svuint16_t op1, svuint16_t op2, uint64_t imm3) + /// + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svxar[_n_u32](svuint32_t op1, svuint32_t op2, uint64_t imm3) + /// + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svxar[_n_u64](svuint64_t op1, svuint64_t op2, uint64_t imm3) + /// + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count) { throw new PlatformNotSupportedException(); } + + } +} + diff --git a/sve_api/out_cs_api/Sve2.System.Runtime.Intrinsics.cs b/sve_api/out_cs_api/Sve2.System.Runtime.Intrinsics.cs new file mode 100644 index 0000000000000..ba801296641ee --- /dev/null +++ b/sve_api/out_cs_api/Sve2.System.Runtime.Intrinsics.cs @@ -0,0 +1,935 @@ + public static System.Numerics.Vector AbsoluteDifferenceAdd(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceAdd(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceAdd(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceAdd(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceAdd(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceAdd(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceAdd(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceAdd(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceAddWideningLower(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceAddWideningLower(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceAddWideningLower(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceAddWideningLower(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceAddWideningLower(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceAddWideningLower(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceAddWideningUpper(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceAddWideningUpper(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceAddWideningUpper(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceAddWideningUpper(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceAddWideningUpper(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceAddWideningUpper(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifferenceWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddCarryWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector AddCarryWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector AddCarryWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector AddCarryWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector AddHighNarowingLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddHighNarowingLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddHighNarowingLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddHighNarowingLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddHighNarowingLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddHighNarowingLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddHighNarowingUpper(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddHighNarowingUpper(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddHighNarowingUpper(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddHighNarowingUpper(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddHighNarowingUpper(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddHighNarowingUpper(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddPairwiseWidening(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddPairwiseWidening(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddPairwiseWidening(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddPairwiseWidening(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddPairwiseWidening(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddPairwiseWidening(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddRotateComplex(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector AddRotateComplex(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector AddRotateComplex(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector AddRotateComplex(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector AddRotateComplex(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector AddRotateComplex(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector AddRotateComplex(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector AddRotateComplex(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector AddSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddSaturateWithSignedAddend(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddSaturateWithSignedAddend(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddSaturateWithSignedAddend(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddSaturateWithSignedAddend(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddSaturateWithUnsignedAddend(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddSaturateWithUnsignedAddend(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddSaturateWithUnsignedAddend(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddSaturateWithUnsignedAddend(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddWideLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddWideLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddWideLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddWideLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddWideLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddWideLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddWideUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddWideUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddWideUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddWideUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddWideUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddWideUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddWideningLowerUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddWideningLowerUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddWideningLowerUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseClearXor(System.Numerics.Vector xor, System.Numerics.Vector value, System.Numerics.Vector mask) { throw null; } + public static System.Numerics.Vector BitwiseClearXor(System.Numerics.Vector xor, System.Numerics.Vector value, System.Numerics.Vector mask) { throw null; } + public static System.Numerics.Vector BitwiseClearXor(System.Numerics.Vector xor, System.Numerics.Vector value, System.Numerics.Vector mask) { throw null; } + public static System.Numerics.Vector BitwiseClearXor(System.Numerics.Vector xor, System.Numerics.Vector value, System.Numerics.Vector mask) { throw null; } + public static System.Numerics.Vector BitwiseClearXor(System.Numerics.Vector xor, System.Numerics.Vector value, System.Numerics.Vector mask) { throw null; } + public static System.Numerics.Vector BitwiseClearXor(System.Numerics.Vector xor, System.Numerics.Vector value, System.Numerics.Vector mask) { throw null; } + public static System.Numerics.Vector BitwiseClearXor(System.Numerics.Vector xor, System.Numerics.Vector value, System.Numerics.Vector mask) { throw null; } + public static System.Numerics.Vector BitwiseClearXor(System.Numerics.Vector xor, System.Numerics.Vector value, System.Numerics.Vector mask) { throw null; } + public static System.Numerics.Vector BitwiseSelect(System.Numerics.Vector select, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseSelect(System.Numerics.Vector select, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseSelect(System.Numerics.Vector select, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseSelect(System.Numerics.Vector select, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseSelect(System.Numerics.Vector select, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseSelect(System.Numerics.Vector select, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseSelect(System.Numerics.Vector select, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseSelect(System.Numerics.Vector select, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseSelectLeftInverted(System.Numerics.Vector select, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseSelectLeftInverted(System.Numerics.Vector select, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseSelectLeftInverted(System.Numerics.Vector select, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseSelectLeftInverted(System.Numerics.Vector select, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseSelectLeftInverted(System.Numerics.Vector select, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseSelectLeftInverted(System.Numerics.Vector select, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseSelectLeftInverted(System.Numerics.Vector select, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseSelectLeftInverted(System.Numerics.Vector select, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseSelectRightInverted(System.Numerics.Vector select, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseSelectRightInverted(System.Numerics.Vector select, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseSelectRightInverted(System.Numerics.Vector select, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseSelectRightInverted(System.Numerics.Vector select, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseSelectRightInverted(System.Numerics.Vector select, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseSelectRightInverted(System.Numerics.Vector select, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseSelectRightInverted(System.Numerics.Vector select, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseSelectRightInverted(System.Numerics.Vector select, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CountMatchingElements(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CountMatchingElements(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CountMatchingElements(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CountMatchingElements(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CountMatchingElementsIn128BitSegments(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CountMatchingElementsIn128BitSegments(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanMask(int left, int right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanMask(long left, long right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanMask(uint left, uint right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanMask(ulong left, ulong right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanMask(int left, int right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanMask(long left, long right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanMask(uint left, uint right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanMask(ulong left, ulong right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanMask(int left, int right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanMask(long left, long right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanMask(uint left, uint right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanMask(ulong left, ulong right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanMask(int left, int right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanMask(long left, long right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanMask(uint left, uint right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanMask(ulong left, ulong right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanOrEqualMask(int left, int right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanOrEqualMask(long left, long right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanOrEqualMask(uint left, uint right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanOrEqualMask(ulong left, ulong right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanOrEqualMask(int left, int right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanOrEqualMask(long left, long right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanOrEqualMask(uint left, uint right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanOrEqualMask(ulong left, ulong right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanOrEqualMask(int left, int right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanOrEqualMask(long left, long right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanOrEqualMask(uint left, uint right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanOrEqualMask(ulong left, ulong right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanOrEqualMask(int left, int right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanOrEqualMask(long left, long right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanOrEqualMask(uint left, uint right) { throw null; } + public static System.Numerics.Vector CreateWhileGreaterThanOrEqualMask(ulong left, ulong right) { throw null; } + public static unsafe System.Numerics.Vector CreateWhileReadAfterWriteMask(sbyte* left, sbyte* right) { throw null; } + public static unsafe System.Numerics.Vector CreateWhileReadAfterWriteMask(short* left, short* right) { throw null; } + public static unsafe System.Numerics.Vector CreateWhileReadAfterWriteMask(int* left, int* right) { throw null; } + public static unsafe System.Numerics.Vector CreateWhileReadAfterWriteMask(long* left, long* right) { throw null; } + public static unsafe System.Numerics.Vector CreateWhileReadAfterWriteMask(byte* left, byte* right) { throw null; } + public static unsafe System.Numerics.Vector CreateWhileReadAfterWriteMask(ushort* left, ushort* right) { throw null; } + public static unsafe System.Numerics.Vector CreateWhileReadAfterWriteMask(uint* left, uint* right) { throw null; } + public static unsafe System.Numerics.Vector CreateWhileReadAfterWriteMask(ulong* left, ulong* right) { throw null; } + public static unsafe System.Numerics.Vector CreateWhileReadAfterWriteMask(float* left, float* right) { throw null; } + public static unsafe System.Numerics.Vector CreateWhileReadAfterWriteMask(double* left, double* right) { throw null; } + public static unsafe System.Numerics.Vector CreateWhileWriteAfterReadMask(sbyte* left, sbyte* right) { throw null; } + public static unsafe System.Numerics.Vector CreateWhileWriteAfterReadMask(short* left, short* right) { throw null; } + public static unsafe System.Numerics.Vector CreateWhileWriteAfterReadMask(int* left, int* right) { throw null; } + public static unsafe System.Numerics.Vector CreateWhileWriteAfterReadMask(long* left, long* right) { throw null; } + public static unsafe System.Numerics.Vector CreateWhileWriteAfterReadMask(byte* left, byte* right) { throw null; } + public static unsafe System.Numerics.Vector CreateWhileWriteAfterReadMask(ushort* left, ushort* right) { throw null; } + public static unsafe System.Numerics.Vector CreateWhileWriteAfterReadMask(uint* left, uint* right) { throw null; } + public static unsafe System.Numerics.Vector CreateWhileWriteAfterReadMask(ulong* left, ulong* right) { throw null; } + public static unsafe System.Numerics.Vector CreateWhileWriteAfterReadMask(float* left, float* right) { throw null; } + public static unsafe System.Numerics.Vector CreateWhileWriteAfterReadMask(double* left, double* right) { throw null; } + public static System.Numerics.Vector DotProductComplex(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector DotProductComplex(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector DotProductComplex(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector DotProductComplex(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector DownConvertNarrowingUpper(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector DownConvertRoundingOdd(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector DownConvertRoundingOddUpper(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector GatherVectorByteZeroExtendNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorByteZeroExtendNonTemporal(System.Numerics.Vector mask, byte* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorByteZeroExtendNonTemporal(System.Numerics.Vector mask, byte* address, System.Numerics.Vector offsets) { throw null; } + public static System.Numerics.Vector GatherVectorByteZeroExtendNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorByteZeroExtendNonTemporal(System.Numerics.Vector mask, byte* address, System.Numerics.Vector offsets) { throw null; } + public static System.Numerics.Vector GatherVectorByteZeroExtendNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorByteZeroExtendNonTemporal(System.Numerics.Vector mask, byte* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorByteZeroExtendNonTemporal(System.Numerics.Vector mask, byte* address, System.Numerics.Vector offsets) { throw null; } + public static System.Numerics.Vector GatherVectorByteZeroExtendNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorByteZeroExtendNonTemporal(System.Numerics.Vector mask, byte* address, System.Numerics.Vector offsets) { throw null; } + public static System.Numerics.Vector GatherVectorInt16SignExtendNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16SignExtendNonTemporal(System.Numerics.Vector mask, short* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorInt16SignExtendNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16SignExtendNonTemporal(System.Numerics.Vector mask, short* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorInt16SignExtendNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16SignExtendNonTemporal(System.Numerics.Vector mask, short* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorInt16SignExtendNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16SignExtendNonTemporal(System.Numerics.Vector mask, short* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32SignExtendNonTemporal(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorInt32SignExtendNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32SignExtendNonTemporal(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32SignExtendNonTemporal(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorInt32SignExtendNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32SignExtendNonTemporal(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32SignExtendNonTemporal(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorInt32SignExtendNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32SignExtendNonTemporal(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32SignExtendNonTemporal(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorInt32SignExtendNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32SignExtendNonTemporal(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static System.Numerics.Vector GatherVectorNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorNonTemporal(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets) { throw null; } + public static System.Numerics.Vector GatherVectorNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorNonTemporal(System.Numerics.Vector mask, long* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorNonTemporal(System.Numerics.Vector mask, long* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorNonTemporal(System.Numerics.Vector mask, long* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorNonTemporal(System.Numerics.Vector mask, long* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorNonTemporal(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static System.Numerics.Vector GatherVectorNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorNonTemporal(System.Numerics.Vector mask, ulong* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorNonTemporal(System.Numerics.Vector mask, ulong* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorNonTemporal(System.Numerics.Vector mask, ulong* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorNonTemporal(System.Numerics.Vector mask, ulong* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorNonTemporal(System.Numerics.Vector mask, float* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorNonTemporal(System.Numerics.Vector mask, double* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorNonTemporal(System.Numerics.Vector mask, double* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorNonTemporal(System.Numerics.Vector mask, double* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorNonTemporal(System.Numerics.Vector mask, double* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorSByteSignExtendNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorSByteSignExtendNonTemporal(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorSByteSignExtendNonTemporal(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector offsets) { throw null; } + public static System.Numerics.Vector GatherVectorSByteSignExtendNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorSByteSignExtendNonTemporal(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector offsets) { throw null; } + public static System.Numerics.Vector GatherVectorSByteSignExtendNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorSByteSignExtendNonTemporal(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorSByteSignExtendNonTemporal(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector offsets) { throw null; } + public static System.Numerics.Vector GatherVectorSByteSignExtendNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorSByteSignExtendNonTemporal(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets) { throw null; } + public static System.Numerics.Vector GatherVectorUInt16ZeroExtendNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16ZeroExtendNonTemporal(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorUInt16ZeroExtendNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16ZeroExtendNonTemporal(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorUInt16ZeroExtendNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16ZeroExtendNonTemporal(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorUInt16ZeroExtendNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt16ZeroExtendNonTemporal(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32ZeroExtendNonTemporal(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorUInt32ZeroExtendNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32ZeroExtendNonTemporal(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32ZeroExtendNonTemporal(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorUInt32ZeroExtendNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32ZeroExtendNonTemporal(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32ZeroExtendNonTemporal(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorUInt32ZeroExtendNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32ZeroExtendNonTemporal(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32ZeroExtendNonTemporal(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector GatherVectorUInt32ZeroExtendNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses) { throw null; } + public static unsafe System.Numerics.Vector GatherVectorUInt32ZeroExtendNonTemporal(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector HalvingAdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector HalvingAdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector HalvingAdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector HalvingAdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector HalvingAdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector HalvingAdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector HalvingAdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector HalvingAdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector HalvingSubtract(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector HalvingSubtract(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector HalvingSubtract(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector HalvingSubtract(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector HalvingSubtract(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector HalvingSubtract(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector HalvingSubtract(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector HalvingSubtract(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector HalvingSubtractReversed(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector HalvingSubtractReversed(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector HalvingSubtractReversed(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector HalvingSubtractReversed(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector HalvingSubtractReversed(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector HalvingSubtractReversed(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector HalvingSubtractReversed(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector HalvingSubtractReversed(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleavingXorLowerUpper(System.Numerics.Vector odd, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleavingXorLowerUpper(System.Numerics.Vector odd, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleavingXorLowerUpper(System.Numerics.Vector odd, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleavingXorLowerUpper(System.Numerics.Vector odd, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleavingXorLowerUpper(System.Numerics.Vector odd, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleavingXorLowerUpper(System.Numerics.Vector odd, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleavingXorLowerUpper(System.Numerics.Vector odd, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleavingXorLowerUpper(System.Numerics.Vector odd, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleavingXorUpperLower(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleavingXorUpperLower(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleavingXorUpperLower(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleavingXorUpperLower(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleavingXorUpperLower(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleavingXorUpperLower(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleavingXorUpperLower(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleavingXorUpperLower(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Log2(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Log2(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Match(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Match(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Match(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Match(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MaxNumberPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MaxNumberPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MaxPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MaxPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MaxPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MaxPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MaxPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MaxPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MaxPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MaxPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MaxPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MaxPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MinNumberPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MinNumberPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MinPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MinPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MinPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MinPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MinPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MinPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MinPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MinPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MinPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MinPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MoveWideningLower(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MoveWideningLower(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MoveWideningLower(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MoveWideningLower(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MoveWideningLower(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MoveWideningLower(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MoveWideningUpper(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MoveWideningUpper(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MoveWideningUpper(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MoveWideningUpper(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MoveWideningUpper(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MoveWideningUpper(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MultiplyAddBySelectedScalar(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector MultiplyAddBySelectedScalar(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector MultiplyAddBySelectedScalar(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector MultiplyAddBySelectedScalar(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector MultiplyAddBySelectedScalar(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector MultiplyAddBySelectedScalar(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector MultiplyAddRotateComplex(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector MultiplyAddRotateComplex(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector MultiplyAddRotateComplex(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector MultiplyAddRotateComplex(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector MultiplyAddRotateComplex(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector MultiplyAddRotateComplex(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector MultiplyAddRotateComplex(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector MultiplyAddRotateComplex(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector MultiplyAddRotateComplexBySelectedScalar(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector MultiplyAddRotateComplexBySelectedScalar(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector MultiplyAddRotateComplexBySelectedScalar(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector MultiplyAddRotateComplexBySelectedScalar(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector MultiplyAddWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplyAddWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplyAddWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector MultiplyAddWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplyAddWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector MultiplyAddWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplyAddWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplyAddWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector MultiplyAddWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplyAddWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector MultiplyAddWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplyAddWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplyAddWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector MultiplyAddWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplyAddWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector MultiplyAddWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplyAddWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplyAddWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector MultiplyAddWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplyAddWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector MultiplyBySelectedScalar(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector MultiplyBySelectedScalar(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector MultiplyBySelectedScalar(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector MultiplyBySelectedScalar(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector MultiplyBySelectedScalar(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector MultiplyBySelectedScalar(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector MultiplySubtractBySelectedScalar(System.Numerics.Vector minuend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector MultiplySubtractBySelectedScalar(System.Numerics.Vector minuend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector MultiplySubtractBySelectedScalar(System.Numerics.Vector minuend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector MultiplySubtractBySelectedScalar(System.Numerics.Vector minuend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector MultiplySubtractBySelectedScalar(System.Numerics.Vector minuend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector MultiplySubtractBySelectedScalar(System.Numerics.Vector minuend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector MultiplySubtractWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplySubtractWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplySubtractWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector MultiplySubtractWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplySubtractWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector MultiplySubtractWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplySubtractWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplySubtractWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector MultiplySubtractWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplySubtractWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector MultiplySubtractWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplySubtractWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplySubtractWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector MultiplySubtractWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplySubtractWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector MultiplySubtractWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplySubtractWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplySubtractWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector MultiplySubtractWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplySubtractWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector MultiplyWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplyWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplyWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, ulong imm_index) { throw null; } + public static System.Numerics.Vector MultiplyWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplyWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, ulong imm_index) { throw null; } + public static System.Numerics.Vector MultiplyWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplyWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplyWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, ulong imm_index) { throw null; } + public static System.Numerics.Vector MultiplyWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplyWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, ulong imm_index) { throw null; } + public static System.Numerics.Vector MultiplyWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplyWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplyWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, ulong imm_index) { throw null; } + public static System.Numerics.Vector MultiplyWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplyWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, ulong imm_index) { throw null; } + public static System.Numerics.Vector MultiplyWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplyWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplyWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, ulong imm_index) { throw null; } + public static System.Numerics.Vector MultiplyWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplyWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, ulong imm_index) { throw null; } + public static System.Numerics.Vector NoMatch(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector NoMatch(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector NoMatch(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector NoMatch(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector PolynomialMultiply(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector PolynomialMultiplyWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector PolynomialMultiplyWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector PolynomialMultiplyWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector PolynomialMultiplyWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector PolynomialMultiplyWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector PolynomialMultiplyWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector PolynomialMultiplyWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector PolynomialMultiplyWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ReciprocalEstimate(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReciprocalSqrtEstimate(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector RoundingAddHighNarowingLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingAddHighNarowingLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingAddHighNarowingLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingAddHighNarowingLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingAddHighNarowingLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingAddHighNarowingLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingAddHighNarowingUpper(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingAddHighNarowingUpper(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingAddHighNarowingUpper(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingAddHighNarowingUpper(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingAddHighNarowingUpper(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingAddHighNarowingUpper(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingHalvingAdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingHalvingAdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingHalvingAdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingHalvingAdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingHalvingAdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingHalvingAdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingHalvingAdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingHalvingAdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingSubtractHighNarowingLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingSubtractHighNarowingLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingSubtractHighNarowingLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingSubtractHighNarowingLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingSubtractHighNarowingLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingSubtractHighNarowingLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingSubtractHighNarowingUpper(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingSubtractHighNarowingUpper(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingSubtractHighNarowingUpper(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingSubtractHighNarowingUpper(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingSubtractHighNarowingUpper(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector RoundingSubtractHighNarowingUpper(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SaturatingAbs(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SaturatingAbs(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SaturatingAbs(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SaturatingAbs(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SaturatingComplexAddRotate(System.Numerics.Vector op1, System.Numerics.Vector op2, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector SaturatingComplexAddRotate(System.Numerics.Vector op1, System.Numerics.Vector op2, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector SaturatingComplexAddRotate(System.Numerics.Vector op1, System.Numerics.Vector op2, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector SaturatingComplexAddRotate(System.Numerics.Vector op1, System.Numerics.Vector op2, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyAddWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyAddWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyAddWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyAddWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyAddWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyAddWideningLowerUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyAddWideningLowerUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyAddWideningLowerUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyAddWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyAddWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyAddWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyAddWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyAddWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyHigh(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyHigh(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyHigh(System.Numerics.Vector op1, System.Numerics.Vector op2, ulong imm_index) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyHigh(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyHigh(System.Numerics.Vector op1, System.Numerics.Vector op2, ulong imm_index) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyHigh(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyHigh(System.Numerics.Vector op1, System.Numerics.Vector op2, ulong imm_index) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplySubtractWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplySubtractWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplySubtractWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplySubtractWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplySubtractWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplySubtractWideningLowerUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplySubtractWideningLowerUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplySubtractWideningLowerUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplySubtractWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplySubtractWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplySubtractWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplySubtractWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplySubtractWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, ulong imm_index) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, ulong imm_index) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, ulong imm_index) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SaturatingDoublingMultiplyWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, ulong imm_index) { throw null; } + public static System.Numerics.Vector SaturatingExtractNarrowingLower(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SaturatingExtractNarrowingLower(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SaturatingExtractNarrowingLower(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SaturatingExtractNarrowingLower(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SaturatingExtractNarrowingLower(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SaturatingExtractNarrowingLower(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SaturatingExtractNarrowingUpper(System.Numerics.Vector even, System.Numerics.Vector op) { throw null; } + public static System.Numerics.Vector SaturatingExtractNarrowingUpper(System.Numerics.Vector even, System.Numerics.Vector op) { throw null; } + public static System.Numerics.Vector SaturatingExtractNarrowingUpper(System.Numerics.Vector even, System.Numerics.Vector op) { throw null; } + public static System.Numerics.Vector SaturatingExtractNarrowingUpper(System.Numerics.Vector even, System.Numerics.Vector op) { throw null; } + public static System.Numerics.Vector SaturatingExtractNarrowingUpper(System.Numerics.Vector even, System.Numerics.Vector op) { throw null; } + public static System.Numerics.Vector SaturatingExtractNarrowingUpper(System.Numerics.Vector even, System.Numerics.Vector op) { throw null; } + public static System.Numerics.Vector SaturatingExtractUnsignedNarrowingLower(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SaturatingExtractUnsignedNarrowingLower(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SaturatingExtractUnsignedNarrowingLower(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SaturatingExtractUnsignedNarrowingUpper(System.Numerics.Vector even, System.Numerics.Vector op) { throw null; } + public static System.Numerics.Vector SaturatingExtractUnsignedNarrowingUpper(System.Numerics.Vector even, System.Numerics.Vector op) { throw null; } + public static System.Numerics.Vector SaturatingExtractUnsignedNarrowingUpper(System.Numerics.Vector even, System.Numerics.Vector op) { throw null; } + public static System.Numerics.Vector SaturatingNegate(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SaturatingNegate(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SaturatingNegate(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SaturatingNegate(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector SaturatingRoundingDoublingMultiplyAddHigh(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SaturatingRoundingDoublingMultiplyAddHigh(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SaturatingRoundingDoublingMultiplyAddHigh(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector SaturatingRoundingDoublingMultiplyAddHigh(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SaturatingRoundingDoublingMultiplyAddHigh(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector SaturatingRoundingDoublingMultiplyAddHigh(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SaturatingRoundingDoublingMultiplyAddHigh(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector SaturatingRoundingDoublingMultiplyHigh(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SaturatingRoundingDoublingMultiplyHigh(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SaturatingRoundingDoublingMultiplyHigh(System.Numerics.Vector op1, System.Numerics.Vector op2, ulong imm_index) { throw null; } + public static System.Numerics.Vector SaturatingRoundingDoublingMultiplyHigh(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SaturatingRoundingDoublingMultiplyHigh(System.Numerics.Vector op1, System.Numerics.Vector op2, ulong imm_index) { throw null; } + public static System.Numerics.Vector SaturatingRoundingDoublingMultiplyHigh(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SaturatingRoundingDoublingMultiplyHigh(System.Numerics.Vector op1, System.Numerics.Vector op2, ulong imm_index) { throw null; } + public static System.Numerics.Vector SaturatingRoundingDoublingMultiplySubtractHigh(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SaturatingRoundingDoublingMultiplySubtractHigh(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SaturatingRoundingDoublingMultiplySubtractHigh(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector SaturatingRoundingDoublingMultiplySubtractHigh(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SaturatingRoundingDoublingMultiplySubtractHigh(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector SaturatingRoundingDoublingMultiplySubtractHigh(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SaturatingRoundingDoublingMultiplySubtractHigh(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static void Scatter16BitNarrowing(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static void Scatter16BitNarrowing(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static void Scatter16BitNarrowing(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static void Scatter16BitNarrowing(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, short* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, short* address, System.Numerics.Vector indices, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, short* address, System.Numerics.Vector indices, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector indices, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, ushort* address, System.Numerics.Vector indices, System.Numerics.Vector data) { throw null; } + public static void Scatter32BitNarrowing(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static void Scatter32BitNarrowing(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, int* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, int* address, System.Numerics.Vector indices, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, uint* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, uint* address, System.Numerics.Vector indices, System.Numerics.Vector data) { throw null; } + public static void Scatter8BitNarrowing(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static void Scatter8BitNarrowing(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static void Scatter8BitNarrowing(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static void Scatter8BitNarrowing(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, sbyte* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, byte* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, byte* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(System.Numerics.Vector mask, byte* address, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static void ScatterNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static unsafe void ScatterNonTemporal(System.Numerics.Vector mask, int* base, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static void ScatterNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static unsafe void ScatterNonTemporal(System.Numerics.Vector mask, long* base, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void ScatterNonTemporal(System.Numerics.Vector mask, long* base, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void ScatterNonTemporal(System.Numerics.Vector mask, long* base, System.Numerics.Vector indices, System.Numerics.Vector data) { throw null; } + public static unsafe void ScatterNonTemporal(System.Numerics.Vector mask, long* base, System.Numerics.Vector indices, System.Numerics.Vector data) { throw null; } + public static void ScatterNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static unsafe void ScatterNonTemporal(System.Numerics.Vector mask, uint* base, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static void ScatterNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static unsafe void ScatterNonTemporal(System.Numerics.Vector mask, ulong* base, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void ScatterNonTemporal(System.Numerics.Vector mask, ulong* base, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void ScatterNonTemporal(System.Numerics.Vector mask, ulong* base, System.Numerics.Vector indices, System.Numerics.Vector data) { throw null; } + public static unsafe void ScatterNonTemporal(System.Numerics.Vector mask, ulong* base, System.Numerics.Vector indices, System.Numerics.Vector data) { throw null; } + public static void ScatterNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static unsafe void ScatterNonTemporal(System.Numerics.Vector mask, float* base, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void ScatterNonTemporal(System.Numerics.Vector mask, double* base, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void ScatterNonTemporal(System.Numerics.Vector mask, double* base, System.Numerics.Vector indices, System.Numerics.Vector data) { throw null; } + public static void ScatterNonTemporal(System.Numerics.Vector mask, System.Numerics.Vector addresses, System.Numerics.Vector data) { throw null; } + public static unsafe void ScatterNonTemporal(System.Numerics.Vector mask, double* base, System.Numerics.Vector offsets, System.Numerics.Vector data) { throw null; } + public static unsafe void ScatterNonTemporal(System.Numerics.Vector mask, double* base, System.Numerics.Vector indices, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ShiftArithmeticRounded(System.Numerics.Vector value, System.Numerics.Vector count) { throw null; } + public static System.Numerics.Vector ShiftArithmeticRounded(System.Numerics.Vector value, System.Numerics.Vector count) { throw null; } + public static System.Numerics.Vector ShiftArithmeticRounded(System.Numerics.Vector value, System.Numerics.Vector count) { throw null; } + public static System.Numerics.Vector ShiftArithmeticRounded(System.Numerics.Vector value, System.Numerics.Vector count) { throw null; } + public static System.Numerics.Vector ShiftArithmeticRoundedSaturate(System.Numerics.Vector value, System.Numerics.Vector count) { throw null; } + public static System.Numerics.Vector ShiftArithmeticRoundedSaturate(System.Numerics.Vector value, System.Numerics.Vector count) { throw null; } + public static System.Numerics.Vector ShiftArithmeticRoundedSaturate(System.Numerics.Vector value, System.Numerics.Vector count) { throw null; } + public static System.Numerics.Vector ShiftArithmeticRoundedSaturate(System.Numerics.Vector value, System.Numerics.Vector count) { throw null; } + public static System.Numerics.Vector ShiftArithmeticSaturate(System.Numerics.Vector value, System.Numerics.Vector count) { throw null; } + public static System.Numerics.Vector ShiftArithmeticSaturate(System.Numerics.Vector value, System.Numerics.Vector count) { throw null; } + public static System.Numerics.Vector ShiftArithmeticSaturate(System.Numerics.Vector value, System.Numerics.Vector count) { throw null; } + public static System.Numerics.Vector ShiftArithmeticSaturate(System.Numerics.Vector value, System.Numerics.Vector count) { throw null; } + public static System.Numerics.Vector ShiftLeftAndInsert(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte shift) { throw null; } + public static System.Numerics.Vector ShiftLeftAndInsert(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte shift) { throw null; } + public static System.Numerics.Vector ShiftLeftAndInsert(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte shift) { throw null; } + public static System.Numerics.Vector ShiftLeftAndInsert(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte shift) { throw null; } + public static System.Numerics.Vector ShiftLeftAndInsert(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte shift) { throw null; } + public static System.Numerics.Vector ShiftLeftAndInsert(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte shift) { throw null; } + public static System.Numerics.Vector ShiftLeftAndInsert(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte shift) { throw null; } + public static System.Numerics.Vector ShiftLeftAndInsert(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte shift) { throw null; } + public static System.Numerics.Vector ShiftLeftLogicalSaturate(System.Numerics.Vector value, System.Numerics.Vector count) { throw null; } + public static System.Numerics.Vector ShiftLeftLogicalSaturate(System.Numerics.Vector value, System.Numerics.Vector count) { throw null; } + public static System.Numerics.Vector ShiftLeftLogicalSaturate(System.Numerics.Vector value, System.Numerics.Vector count) { throw null; } + public static System.Numerics.Vector ShiftLeftLogicalSaturate(System.Numerics.Vector value, System.Numerics.Vector count) { throw null; } + public static System.Numerics.Vector ShiftLeftLogicalSaturateUnsigned(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftLeftLogicalSaturateUnsigned(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftLeftLogicalSaturateUnsigned(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftLeftLogicalSaturateUnsigned(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftLeftLogicalWideningEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftLeftLogicalWideningEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftLeftLogicalWideningEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftLeftLogicalWideningEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftLeftLogicalWideningEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftLeftLogicalWideningEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftLeftLogicalWideningOdd(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftLeftLogicalWideningOdd(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftLeftLogicalWideningOdd(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftLeftLogicalWideningOdd(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftLeftLogicalWideningOdd(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftLeftLogicalWideningOdd(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftLogicalRounded(System.Numerics.Vector value, System.Numerics.Vector count) { throw null; } + public static System.Numerics.Vector ShiftLogicalRounded(System.Numerics.Vector value, System.Numerics.Vector count) { throw null; } + public static System.Numerics.Vector ShiftLogicalRounded(System.Numerics.Vector value, System.Numerics.Vector count) { throw null; } + public static System.Numerics.Vector ShiftLogicalRounded(System.Numerics.Vector value, System.Numerics.Vector count) { throw null; } + public static System.Numerics.Vector ShiftLogicalRoundedSaturate(System.Numerics.Vector value, System.Numerics.Vector count) { throw null; } + public static System.Numerics.Vector ShiftLogicalRoundedSaturate(System.Numerics.Vector value, System.Numerics.Vector count) { throw null; } + public static System.Numerics.Vector ShiftLogicalRoundedSaturate(System.Numerics.Vector value, System.Numerics.Vector count) { throw null; } + public static System.Numerics.Vector ShiftLogicalRoundedSaturate(System.Numerics.Vector value, System.Numerics.Vector count) { throw null; } + public static System.Numerics.Vector ShiftRightAndInsert(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte shift) { throw null; } + public static System.Numerics.Vector ShiftRightAndInsert(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte shift) { throw null; } + public static System.Numerics.Vector ShiftRightAndInsert(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte shift) { throw null; } + public static System.Numerics.Vector ShiftRightAndInsert(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte shift) { throw null; } + public static System.Numerics.Vector ShiftRightAndInsert(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte shift) { throw null; } + public static System.Numerics.Vector ShiftRightAndInsert(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte shift) { throw null; } + public static System.Numerics.Vector ShiftRightAndInsert(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte shift) { throw null; } + public static System.Numerics.Vector ShiftRightAndInsert(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte shift) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticAdd(System.Numerics.Vector addend, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticAdd(System.Numerics.Vector addend, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticAdd(System.Numerics.Vector addend, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticAdd(System.Numerics.Vector addend, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticNarrowingSaturateEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticNarrowingSaturateEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticNarrowingSaturateEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticNarrowingSaturateEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticNarrowingSaturateEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticNarrowingSaturateEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticNarrowingSaturateOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticNarrowingSaturateOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticNarrowingSaturateOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticNarrowingSaturateOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticNarrowingSaturateOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticNarrowingSaturateOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticNarrowingSaturateUnsignedEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticNarrowingSaturateUnsignedEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticNarrowingSaturateUnsignedEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticNarrowingSaturateUnsignedOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticNarrowingSaturateUnsignedOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticNarrowingSaturateUnsignedOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticRounded(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticRounded(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticRounded(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticRounded(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticRoundedAdd(System.Numerics.Vector addend, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticRoundedAdd(System.Numerics.Vector addend, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticRoundedAdd(System.Numerics.Vector addend, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticRoundedAdd(System.Numerics.Vector addend, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticRoundedNarrowingSaturateEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticRoundedNarrowingSaturateEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticRoundedNarrowingSaturateEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticRoundedNarrowingSaturateOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticRoundedNarrowingSaturateOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticRoundedNarrowingSaturateOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalAdd(System.Numerics.Vector addend, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalAdd(System.Numerics.Vector addend, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalAdd(System.Numerics.Vector addend, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalAdd(System.Numerics.Vector addend, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalNarrowingEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalNarrowingEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalNarrowingEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalNarrowingEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalNarrowingEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalNarrowingEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalNarrowingOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalNarrowingOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalNarrowingOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalNarrowingOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalNarrowingOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalNarrowingOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalRounded(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalRounded(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalRounded(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalRounded(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalRoundedAdd(System.Numerics.Vector addend, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalRoundedAdd(System.Numerics.Vector addend, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalRoundedAdd(System.Numerics.Vector addend, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalRoundedAdd(System.Numerics.Vector addend, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalRoundedNarrowingEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalRoundedNarrowingEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalRoundedNarrowingEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalRoundedNarrowingEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalRoundedNarrowingEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalRoundedNarrowingEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalRoundedNarrowingOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalRoundedNarrowingOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalRoundedNarrowingOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalRoundedNarrowingOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalRoundedNarrowingOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalRoundedNarrowingOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalRoundedNarrowingSaturateEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalRoundedNarrowingSaturateEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalRoundedNarrowingSaturateEven(System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalRoundedNarrowingSaturateOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalRoundedNarrowingSaturateOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector ShiftRightLogicalRoundedNarrowingSaturateOdd(System.Numerics.Vector even, System.Numerics.Vector value, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector SubtractHighNarowingLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractHighNarowingLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractHighNarowingLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractHighNarowingLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractHighNarowingLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractHighNarowingLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractHighNarowingUpper(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractHighNarowingUpper(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractHighNarowingUpper(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractHighNarowingUpper(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractHighNarowingUpper(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractHighNarowingUpper(System.Numerics.Vector even, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractSaturate(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractSaturateReversed(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractSaturateReversed(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractSaturateReversed(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractSaturateReversed(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractSaturateReversed(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractSaturateReversed(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractSaturateReversed(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractSaturateReversed(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideningLowerUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideningLowerUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideningLowerUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideningUpperLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideningUpperLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWideningUpperLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector SubtractWithBorrowWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SubtractWithBorrowWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SubtractWithBorrowWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector SubtractWithBorrowWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector UpConvertWideningUpper(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector VectorTableLookup((System.Numerics.Vector data1, System.Numerics.Vector data2), System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookup((System.Numerics.Vector data1, System.Numerics.Vector data2), System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookup((System.Numerics.Vector data1, System.Numerics.Vector data2), System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookup((System.Numerics.Vector data1, System.Numerics.Vector data2), System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookup((System.Numerics.Vector data1, System.Numerics.Vector data2), System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookup((System.Numerics.Vector data1, System.Numerics.Vector data2), System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookup((System.Numerics.Vector data1, System.Numerics.Vector data2), System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookup((System.Numerics.Vector data1, System.Numerics.Vector data2), System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookup((System.Numerics.Vector data1, System.Numerics.Vector data2), System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookup((System.Numerics.Vector data1, System.Numerics.Vector data2), System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookupExtension(System.Numerics.Vector fallback, System.Numerics.Vector data, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookupExtension(System.Numerics.Vector fallback, System.Numerics.Vector data, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookupExtension(System.Numerics.Vector fallback, System.Numerics.Vector data, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookupExtension(System.Numerics.Vector fallback, System.Numerics.Vector data, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookupExtension(System.Numerics.Vector fallback, System.Numerics.Vector data, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookupExtension(System.Numerics.Vector fallback, System.Numerics.Vector data, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookupExtension(System.Numerics.Vector fallback, System.Numerics.Vector data, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookupExtension(System.Numerics.Vector fallback, System.Numerics.Vector data, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookupExtension(System.Numerics.Vector fallback, System.Numerics.Vector data, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookupExtension(System.Numerics.Vector fallback, System.Numerics.Vector data, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector Xor(System.Numerics.Vector value1, System.Numerics.Vector value2, System.Numerics.Vector value3) { throw null; } + public static System.Numerics.Vector Xor(System.Numerics.Vector value1, System.Numerics.Vector value2, System.Numerics.Vector value3) { throw null; } + public static System.Numerics.Vector Xor(System.Numerics.Vector value1, System.Numerics.Vector value2, System.Numerics.Vector value3) { throw null; } + public static System.Numerics.Vector Xor(System.Numerics.Vector value1, System.Numerics.Vector value2, System.Numerics.Vector value3) { throw null; } + public static System.Numerics.Vector Xor(System.Numerics.Vector value1, System.Numerics.Vector value2, System.Numerics.Vector value3) { throw null; } + public static System.Numerics.Vector Xor(System.Numerics.Vector value1, System.Numerics.Vector value2, System.Numerics.Vector value3) { throw null; } + public static System.Numerics.Vector Xor(System.Numerics.Vector value1, System.Numerics.Vector value2, System.Numerics.Vector value3) { throw null; } + public static System.Numerics.Vector Xor(System.Numerics.Vector value1, System.Numerics.Vector value2, System.Numerics.Vector value3) { throw null; } + public static System.Numerics.Vector XorRotateRight(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector XorRotateRight(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector XorRotateRight(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector XorRotateRight(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector XorRotateRight(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector XorRotateRight(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector XorRotateRight(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte count) { throw null; } + public static System.Numerics.Vector XorRotateRight(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte count) { throw null; } + diff --git a/sve_api/out_cs_api/Sve2.cs b/sve_api/out_cs_api/Sve2.cs new file mode 100644 index 0000000000000..1d280a4b8a9ef --- /dev/null +++ b/sve_api/out_cs_api/Sve2.cs @@ -0,0 +1,5442 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class Sve2 : AdvSimd + { + internal Sve2() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// AbsoluteDifferenceAdd : Absolute difference and accumulate + + /// + /// svint8_t svaba[_s8](svint8_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAdd(addend, left, right); + + /// + /// svint16_t svaba[_s16](svint16_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAdd(addend, left, right); + + /// + /// svint32_t svaba[_s32](svint32_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAdd(addend, left, right); + + /// + /// svint64_t svaba[_s64](svint64_t op1, svint64_t op2, svint64_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAdd(addend, left, right); + + /// + /// svuint8_t svaba[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAdd(addend, left, right); + + /// + /// svuint16_t svaba[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAdd(addend, left, right); + + /// + /// svuint32_t svaba[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAdd(addend, left, right); + + /// + /// svuint64_t svaba[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAdd(addend, left, right); + + + /// AbsoluteDifferenceAddWideningLower : Absolute difference and accumulate long (bottom) + + /// + /// svint16_t svabalb[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAddWideningLower(addend, left, right); + + /// + /// svint32_t svabalb[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAddWideningLower(addend, left, right); + + /// + /// svint64_t svabalb[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAddWideningLower(addend, left, right); + + /// + /// svuint16_t svabalb[_u16](svuint16_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAddWideningLower(addend, left, right); + + /// + /// svuint32_t svabalb[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAddWideningLower(addend, left, right); + + /// + /// svuint64_t svabalb[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAddWideningLower(addend, left, right); + + + /// AbsoluteDifferenceAddWideningUpper : Absolute difference and accumulate long (top) + + /// + /// svint16_t svabalt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAddWideningUpper(addend, left, right); + + /// + /// svint32_t svabalt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAddWideningUpper(addend, left, right); + + /// + /// svint64_t svabalt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAddWideningUpper(addend, left, right); + + /// + /// svuint16_t svabalt[_u16](svuint16_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAddWideningUpper(addend, left, right); + + /// + /// svuint32_t svabalt[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAddWideningUpper(addend, left, right); + + /// + /// svuint64_t svabalt[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAddWideningUpper(addend, left, right); + + + /// AbsoluteDifferenceWideningLower : Absolute difference long (bottom) + + /// + /// svint16_t svabdlb[_s16](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, Vector right) => AbsoluteDifferenceWideningLower(left, right); + + /// + /// svint32_t svabdlb[_s32](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, Vector right) => AbsoluteDifferenceWideningLower(left, right); + + /// + /// svint64_t svabdlb[_s64](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, Vector right) => AbsoluteDifferenceWideningLower(left, right); + + /// + /// svuint16_t svabdlb[_u16](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, Vector right) => AbsoluteDifferenceWideningLower(left, right); + + /// + /// svuint32_t svabdlb[_u32](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, Vector right) => AbsoluteDifferenceWideningLower(left, right); + + /// + /// svuint64_t svabdlb[_u64](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, Vector right) => AbsoluteDifferenceWideningLower(left, right); + + + /// AbsoluteDifferenceWideningUpper : Absolute difference long (top) + + /// + /// svint16_t svabdlt[_s16](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, Vector right) => AbsoluteDifferenceWideningUpper(left, right); + + /// + /// svint32_t svabdlt[_s32](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, Vector right) => AbsoluteDifferenceWideningUpper(left, right); + + /// + /// svint64_t svabdlt[_s64](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, Vector right) => AbsoluteDifferenceWideningUpper(left, right); + + /// + /// svuint16_t svabdlt[_u16](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, Vector right) => AbsoluteDifferenceWideningUpper(left, right); + + /// + /// svuint32_t svabdlt[_u32](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, Vector right) => AbsoluteDifferenceWideningUpper(left, right); + + /// + /// svuint64_t svabdlt[_u64](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, Vector right) => AbsoluteDifferenceWideningUpper(left, right); + + + /// AddCarryWideningLower : Add with carry long (bottom) + + /// + /// svuint32_t svadclb[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector AddCarryWideningLower(Vector op1, Vector op2, Vector op3) => AddCarryWideningLower(op1, op2, op3); + + /// + /// svuint64_t svadclb[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// + public static unsafe Vector AddCarryWideningLower(Vector op1, Vector op2, Vector op3) => AddCarryWideningLower(op1, op2, op3); + + + /// AddCarryWideningUpper : Add with carry long (top) + + /// + /// svuint32_t svadclt[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector AddCarryWideningUpper(Vector op1, Vector op2, Vector op3) => AddCarryWideningUpper(op1, op2, op3); + + /// + /// svuint64_t svadclt[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// + public static unsafe Vector AddCarryWideningUpper(Vector op1, Vector op2, Vector op3) => AddCarryWideningUpper(op1, op2, op3); + + + /// AddHighNarowingLower : Add narrow high part (bottom) + + /// + /// svint8_t svaddhnb[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector AddHighNarowingLower(Vector left, Vector right) => AddHighNarowingLower(left, right); + + /// + /// svint16_t svaddhnb[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector AddHighNarowingLower(Vector left, Vector right) => AddHighNarowingLower(left, right); + + /// + /// svint32_t svaddhnb[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector AddHighNarowingLower(Vector left, Vector right) => AddHighNarowingLower(left, right); + + /// + /// svuint8_t svaddhnb[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector AddHighNarowingLower(Vector left, Vector right) => AddHighNarowingLower(left, right); + + /// + /// svuint16_t svaddhnb[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector AddHighNarowingLower(Vector left, Vector right) => AddHighNarowingLower(left, right); + + /// + /// svuint32_t svaddhnb[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector AddHighNarowingLower(Vector left, Vector right) => AddHighNarowingLower(left, right); + + + /// AddHighNarowingUpper : Add narrow high part (top) + + /// + /// svint8_t svaddhnt[_s16](svint8_t even, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, Vector right) => AddHighNarowingUpper(even, left, right); + + /// + /// svint16_t svaddhnt[_s32](svint16_t even, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, Vector right) => AddHighNarowingUpper(even, left, right); + + /// + /// svint32_t svaddhnt[_s64](svint32_t even, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, Vector right) => AddHighNarowingUpper(even, left, right); + + /// + /// svuint8_t svaddhnt[_u16](svuint8_t even, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, Vector right) => AddHighNarowingUpper(even, left, right); + + /// + /// svuint16_t svaddhnt[_u32](svuint16_t even, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, Vector right) => AddHighNarowingUpper(even, left, right); + + /// + /// svuint32_t svaddhnt[_u64](svuint32_t even, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, Vector right) => AddHighNarowingUpper(even, left, right); + + + /// AddPairwise : Add pairwise + + /// + /// svint8_t svaddp[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svaddp[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) => AddPairwise(left, right); + + /// + /// svint16_t svaddp[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svaddp[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) => AddPairwise(left, right); + + /// + /// svint32_t svaddp[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svaddp[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) => AddPairwise(left, right); + + /// + /// svint64_t svaddp[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svaddp[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) => AddPairwise(left, right); + + /// + /// svuint8_t svaddp[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svaddp[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) => AddPairwise(left, right); + + /// + /// svuint16_t svaddp[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svaddp[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) => AddPairwise(left, right); + + /// + /// svuint32_t svaddp[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svaddp[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) => AddPairwise(left, right); + + /// + /// svuint64_t svaddp[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svaddp[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) => AddPairwise(left, right); + + /// + /// svfloat32_t svaddp[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svaddp[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) => AddPairwise(left, right); + + /// + /// svfloat64_t svaddp[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svaddp[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) => AddPairwise(left, right); + + + /// AddPairwiseWidening : Add and accumulate long pairwise + + /// + /// svint16_t svadalp[_s16]_m(svbool_t pg, svint16_t op1, svint8_t op2) + /// svint16_t svadalp[_s16]_x(svbool_t pg, svint16_t op1, svint8_t op2) + /// svint16_t svadalp[_s16]_z(svbool_t pg, svint16_t op1, svint8_t op2) + /// + public static unsafe Vector AddPairwiseWidening(Vector left, Vector right) => AddPairwiseWidening(left, right); + + /// + /// svint32_t svadalp[_s32]_m(svbool_t pg, svint32_t op1, svint16_t op2) + /// svint32_t svadalp[_s32]_x(svbool_t pg, svint32_t op1, svint16_t op2) + /// svint32_t svadalp[_s32]_z(svbool_t pg, svint32_t op1, svint16_t op2) + /// + public static unsafe Vector AddPairwiseWidening(Vector left, Vector right) => AddPairwiseWidening(left, right); + + /// + /// svint64_t svadalp[_s64]_m(svbool_t pg, svint64_t op1, svint32_t op2) + /// svint64_t svadalp[_s64]_x(svbool_t pg, svint64_t op1, svint32_t op2) + /// svint64_t svadalp[_s64]_z(svbool_t pg, svint64_t op1, svint32_t op2) + /// + public static unsafe Vector AddPairwiseWidening(Vector left, Vector right) => AddPairwiseWidening(left, right); + + /// + /// svuint16_t svadalp[_u16]_m(svbool_t pg, svuint16_t op1, svuint8_t op2) + /// svuint16_t svadalp[_u16]_x(svbool_t pg, svuint16_t op1, svuint8_t op2) + /// svuint16_t svadalp[_u16]_z(svbool_t pg, svuint16_t op1, svuint8_t op2) + /// + public static unsafe Vector AddPairwiseWidening(Vector left, Vector right) => AddPairwiseWidening(left, right); + + /// + /// svuint32_t svadalp[_u32]_m(svbool_t pg, svuint32_t op1, svuint16_t op2) + /// svuint32_t svadalp[_u32]_x(svbool_t pg, svuint32_t op1, svuint16_t op2) + /// svuint32_t svadalp[_u32]_z(svbool_t pg, svuint32_t op1, svuint16_t op2) + /// + public static unsafe Vector AddPairwiseWidening(Vector left, Vector right) => AddPairwiseWidening(left, right); + + /// + /// svuint64_t svadalp[_u64]_m(svbool_t pg, svuint64_t op1, svuint32_t op2) + /// svuint64_t svadalp[_u64]_x(svbool_t pg, svuint64_t op1, svuint32_t op2) + /// svuint64_t svadalp[_u64]_z(svbool_t pg, svuint64_t op1, svuint32_t op2) + /// + public static unsafe Vector AddPairwiseWidening(Vector left, Vector right) => AddPairwiseWidening(left, right); + + + /// AddRotateComplex : Complex add with rotate + + /// + /// svint8_t svcadd[_s8](svint8_t op1, svint8_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) => AddRotateComplex(left, right, rotation); + + /// + /// svint16_t svcadd[_s16](svint16_t op1, svint16_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) => AddRotateComplex(left, right, rotation); + + /// + /// svint32_t svcadd[_s32](svint32_t op1, svint32_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) => AddRotateComplex(left, right, rotation); + + /// + /// svint64_t svcadd[_s64](svint64_t op1, svint64_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) => AddRotateComplex(left, right, rotation); + + /// + /// svuint8_t svcadd[_u8](svuint8_t op1, svuint8_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) => AddRotateComplex(left, right, rotation); + + /// + /// svuint16_t svcadd[_u16](svuint16_t op1, svuint16_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) => AddRotateComplex(left, right, rotation); + + /// + /// svuint32_t svcadd[_u32](svuint32_t op1, svuint32_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) => AddRotateComplex(left, right, rotation); + + /// + /// svuint64_t svcadd[_u64](svuint64_t op1, svuint64_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) => AddRotateComplex(left, right, rotation); + + + /// AddSaturate : Saturating add + + /// + /// svint8_t svqadd[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svqadd[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svqadd[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svint16_t svqadd[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svqadd[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svqadd[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svint32_t svqadd[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svqadd[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svqadd[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svint64_t svqadd[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svqadd[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svqadd[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svuint8_t svqadd[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svqadd[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svqadd[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svuint16_t svqadd[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svqadd[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svqadd[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svuint32_t svqadd[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svqadd[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svqadd[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svuint64_t svqadd[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svqadd[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svqadd[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + + /// AddSaturateWithSignedAddend : Saturating add with signed addend + + /// + /// svuint8_t svsqadd[_u8]_m(svbool_t pg, svuint8_t op1, svint8_t op2) + /// svuint8_t svsqadd[_u8]_x(svbool_t pg, svuint8_t op1, svint8_t op2) + /// svuint8_t svsqadd[_u8]_z(svbool_t pg, svuint8_t op1, svint8_t op2) + /// + public static unsafe Vector AddSaturateWithSignedAddend(Vector left, Vector right) => AddSaturateWithSignedAddend(left, right); + + /// + /// svuint16_t svsqadd[_u16]_m(svbool_t pg, svuint16_t op1, svint16_t op2) + /// svuint16_t svsqadd[_u16]_x(svbool_t pg, svuint16_t op1, svint16_t op2) + /// svuint16_t svsqadd[_u16]_z(svbool_t pg, svuint16_t op1, svint16_t op2) + /// + public static unsafe Vector AddSaturateWithSignedAddend(Vector left, Vector right) => AddSaturateWithSignedAddend(left, right); + + /// + /// svuint32_t svsqadd[_u32]_m(svbool_t pg, svuint32_t op1, svint32_t op2) + /// svuint32_t svsqadd[_u32]_x(svbool_t pg, svuint32_t op1, svint32_t op2) + /// svuint32_t svsqadd[_u32]_z(svbool_t pg, svuint32_t op1, svint32_t op2) + /// + public static unsafe Vector AddSaturateWithSignedAddend(Vector left, Vector right) => AddSaturateWithSignedAddend(left, right); + + /// + /// svuint64_t svsqadd[_u64]_m(svbool_t pg, svuint64_t op1, svint64_t op2) + /// svuint64_t svsqadd[_u64]_x(svbool_t pg, svuint64_t op1, svint64_t op2) + /// svuint64_t svsqadd[_u64]_z(svbool_t pg, svuint64_t op1, svint64_t op2) + /// + public static unsafe Vector AddSaturateWithSignedAddend(Vector left, Vector right) => AddSaturateWithSignedAddend(left, right); + + + /// AddSaturateWithUnsignedAddend : Saturating add with unsigned addend + + /// + /// svint8_t svuqadd[_s8]_m(svbool_t pg, svint8_t op1, svuint8_t op2) + /// svint8_t svuqadd[_s8]_x(svbool_t pg, svint8_t op1, svuint8_t op2) + /// svint8_t svuqadd[_s8]_z(svbool_t pg, svint8_t op1, svuint8_t op2) + /// + public static unsafe Vector AddSaturateWithUnsignedAddend(Vector left, Vector right) => AddSaturateWithUnsignedAddend(left, right); + + /// + /// svint16_t svuqadd[_s16]_m(svbool_t pg, svint16_t op1, svuint16_t op2) + /// svint16_t svuqadd[_s16]_x(svbool_t pg, svint16_t op1, svuint16_t op2) + /// svint16_t svuqadd[_s16]_z(svbool_t pg, svint16_t op1, svuint16_t op2) + /// + public static unsafe Vector AddSaturateWithUnsignedAddend(Vector left, Vector right) => AddSaturateWithUnsignedAddend(left, right); + + /// + /// svint32_t svuqadd[_s32]_m(svbool_t pg, svint32_t op1, svuint32_t op2) + /// svint32_t svuqadd[_s32]_x(svbool_t pg, svint32_t op1, svuint32_t op2) + /// svint32_t svuqadd[_s32]_z(svbool_t pg, svint32_t op1, svuint32_t op2) + /// + public static unsafe Vector AddSaturateWithUnsignedAddend(Vector left, Vector right) => AddSaturateWithUnsignedAddend(left, right); + + /// + /// svint64_t svuqadd[_s64]_m(svbool_t pg, svint64_t op1, svuint64_t op2) + /// svint64_t svuqadd[_s64]_x(svbool_t pg, svint64_t op1, svuint64_t op2) + /// svint64_t svuqadd[_s64]_z(svbool_t pg, svint64_t op1, svuint64_t op2) + /// + public static unsafe Vector AddSaturateWithUnsignedAddend(Vector left, Vector right) => AddSaturateWithUnsignedAddend(left, right); + + + /// AddWideLower : Add wide (bottom) + + /// + /// svint16_t svaddwb[_s16](svint16_t op1, svint8_t op2) + /// + public static unsafe Vector AddWideLower(Vector left, Vector right) => AddWideLower(left, right); + + /// + /// svint32_t svaddwb[_s32](svint32_t op1, svint16_t op2) + /// + public static unsafe Vector AddWideLower(Vector left, Vector right) => AddWideLower(left, right); + + /// + /// svint64_t svaddwb[_s64](svint64_t op1, svint32_t op2) + /// + public static unsafe Vector AddWideLower(Vector left, Vector right) => AddWideLower(left, right); + + /// + /// svuint16_t svaddwb[_u16](svuint16_t op1, svuint8_t op2) + /// + public static unsafe Vector AddWideLower(Vector left, Vector right) => AddWideLower(left, right); + + /// + /// svuint32_t svaddwb[_u32](svuint32_t op1, svuint16_t op2) + /// + public static unsafe Vector AddWideLower(Vector left, Vector right) => AddWideLower(left, right); + + /// + /// svuint64_t svaddwb[_u64](svuint64_t op1, svuint32_t op2) + /// + public static unsafe Vector AddWideLower(Vector left, Vector right) => AddWideLower(left, right); + + + /// AddWideUpper : Add wide (top) + + /// + /// svint16_t svaddwt[_s16](svint16_t op1, svint8_t op2) + /// + public static unsafe Vector AddWideUpper(Vector left, Vector right) => AddWideUpper(left, right); + + /// + /// svint32_t svaddwt[_s32](svint32_t op1, svint16_t op2) + /// + public static unsafe Vector AddWideUpper(Vector left, Vector right) => AddWideUpper(left, right); + + /// + /// svint64_t svaddwt[_s64](svint64_t op1, svint32_t op2) + /// + public static unsafe Vector AddWideUpper(Vector left, Vector right) => AddWideUpper(left, right); + + /// + /// svuint16_t svaddwt[_u16](svuint16_t op1, svuint8_t op2) + /// + public static unsafe Vector AddWideUpper(Vector left, Vector right) => AddWideUpper(left, right); + + /// + /// svuint32_t svaddwt[_u32](svuint32_t op1, svuint16_t op2) + /// + public static unsafe Vector AddWideUpper(Vector left, Vector right) => AddWideUpper(left, right); + + /// + /// svuint64_t svaddwt[_u64](svuint64_t op1, svuint32_t op2) + /// + public static unsafe Vector AddWideUpper(Vector left, Vector right) => AddWideUpper(left, right); + + + /// AddWideningLower : Add long (bottom) + + /// + /// svint16_t svaddlb[_s16](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector AddWideningLower(Vector left, Vector right) => AddWideningLower(left, right); + + /// + /// svint32_t svaddlb[_s32](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector AddWideningLower(Vector left, Vector right) => AddWideningLower(left, right); + + /// + /// svint64_t svaddlb[_s64](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector AddWideningLower(Vector left, Vector right) => AddWideningLower(left, right); + + /// + /// svuint16_t svaddlb[_u16](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector AddWideningLower(Vector left, Vector right) => AddWideningLower(left, right); + + /// + /// svuint32_t svaddlb[_u32](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector AddWideningLower(Vector left, Vector right) => AddWideningLower(left, right); + + /// + /// svuint64_t svaddlb[_u64](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector AddWideningLower(Vector left, Vector right) => AddWideningLower(left, right); + + + /// AddWideningLowerUpper : Add long (bottom + top) + + /// + /// svint16_t svaddlbt[_s16](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector AddWideningLowerUpper(Vector left, Vector right) => AddWideningLowerUpper(left, right); + + /// + /// svint32_t svaddlbt[_s32](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector AddWideningLowerUpper(Vector left, Vector right) => AddWideningLowerUpper(left, right); + + /// + /// svint64_t svaddlbt[_s64](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector AddWideningLowerUpper(Vector left, Vector right) => AddWideningLowerUpper(left, right); + + + /// AddWideningUpper : Add long (top) + + /// + /// svint16_t svaddlt[_s16](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector AddWideningUpper(Vector left, Vector right) => AddWideningUpper(left, right); + + /// + /// svint32_t svaddlt[_s32](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector AddWideningUpper(Vector left, Vector right) => AddWideningUpper(left, right); + + /// + /// svint64_t svaddlt[_s64](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector AddWideningUpper(Vector left, Vector right) => AddWideningUpper(left, right); + + /// + /// svuint16_t svaddlt[_u16](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector AddWideningUpper(Vector left, Vector right) => AddWideningUpper(left, right); + + /// + /// svuint32_t svaddlt[_u32](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector AddWideningUpper(Vector left, Vector right) => AddWideningUpper(left, right); + + /// + /// svuint64_t svaddlt[_u64](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector AddWideningUpper(Vector left, Vector right) => AddWideningUpper(left, right); + + + /// BitwiseClearXor : Bitwise clear and exclusive OR + + /// + /// svint8_t svbcax[_s8](svint8_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask) => BitwiseClearXor(xor, value, mask); + + /// + /// svint16_t svbcax[_s16](svint16_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask) => BitwiseClearXor(xor, value, mask); + + /// + /// svint32_t svbcax[_s32](svint32_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask) => BitwiseClearXor(xor, value, mask); + + /// + /// svint64_t svbcax[_s64](svint64_t op1, svint64_t op2, svint64_t op3) + /// + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask) => BitwiseClearXor(xor, value, mask); + + /// + /// svuint8_t svbcax[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask) => BitwiseClearXor(xor, value, mask); + + /// + /// svuint16_t svbcax[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask) => BitwiseClearXor(xor, value, mask); + + /// + /// svuint32_t svbcax[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask) => BitwiseClearXor(xor, value, mask); + + /// + /// svuint64_t svbcax[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask) => BitwiseClearXor(xor, value, mask); + + + /// BitwiseSelect : Bitwise select + + /// + /// svint8_t svbsl[_s8](svint8_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right) => BitwiseSelect(select, left, right); + + /// + /// svint16_t svbsl[_s16](svint16_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right) => BitwiseSelect(select, left, right); + + /// + /// svint32_t svbsl[_s32](svint32_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right) => BitwiseSelect(select, left, right); + + /// + /// svint64_t svbsl[_s64](svint64_t op1, svint64_t op2, svint64_t op3) + /// + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right) => BitwiseSelect(select, left, right); + + /// + /// svuint8_t svbsl[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right) => BitwiseSelect(select, left, right); + + /// + /// svuint16_t svbsl[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right) => BitwiseSelect(select, left, right); + + /// + /// svuint32_t svbsl[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right) => BitwiseSelect(select, left, right); + + /// + /// svuint64_t svbsl[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right) => BitwiseSelect(select, left, right); + + + + /// BitwiseSelectLeftInverted : Bitwise select with first input inverted + + /// + /// svint8_t svbsl1n[_s8](svint8_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right) => BitwiseSelectLeftInverted(select, left, right); + + /// + /// svint16_t svbsl1n[_s16](svint16_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right) => BitwiseSelectLeftInverted(select, left, right); + + /// + /// svint32_t svbsl1n[_s32](svint32_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right) => BitwiseSelectLeftInverted(select, left, right); + + /// + /// svint64_t svbsl1n[_s64](svint64_t op1, svint64_t op2, svint64_t op3) + /// + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right) => BitwiseSelectLeftInverted(select, left, right); + + /// + /// svuint8_t svbsl1n[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right) => BitwiseSelectLeftInverted(select, left, right); + + /// + /// svuint16_t svbsl1n[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right) => BitwiseSelectLeftInverted(select, left, right); + + /// + /// svuint32_t svbsl1n[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right) => BitwiseSelectLeftInverted(select, left, right); + + /// + /// svuint64_t svbsl1n[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right) => BitwiseSelectLeftInverted(select, left, right); + + + /// BitwiseSelectRightInverted : Bitwise select with second input inverted + + /// + /// svint8_t svbsl2n[_s8](svint8_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right) => BitwiseSelectRightInverted(select, left, right); + + /// + /// svint16_t svbsl2n[_s16](svint16_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right) => BitwiseSelectRightInverted(select, left, right); + + /// + /// svint32_t svbsl2n[_s32](svint32_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right) => BitwiseSelectRightInverted(select, left, right); + + /// + /// svint64_t svbsl2n[_s64](svint64_t op1, svint64_t op2, svint64_t op3) + /// + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right) => BitwiseSelectRightInverted(select, left, right); + + /// + /// svuint8_t svbsl2n[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right) => BitwiseSelectRightInverted(select, left, right); + + /// + /// svuint16_t svbsl2n[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right) => BitwiseSelectRightInverted(select, left, right); + + /// + /// svuint32_t svbsl2n[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right) => BitwiseSelectRightInverted(select, left, right); + + /// + /// svuint64_t svbsl2n[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right) => BitwiseSelectRightInverted(select, left, right); + + + /// CountMatchingElements : Count matching elements + + /// + /// svuint32_t svhistcnt[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector CountMatchingElements(Vector mask, Vector left, Vector right) => CountMatchingElements(mask, left, right); + + /// + /// svuint32_t svhistcnt[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector CountMatchingElements(Vector mask, Vector left, Vector right) => CountMatchingElements(mask, left, right); + + /// + /// svuint64_t svhistcnt[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector CountMatchingElements(Vector mask, Vector left, Vector right) => CountMatchingElements(mask, left, right); + + /// + /// svuint64_t svhistcnt[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector CountMatchingElements(Vector mask, Vector left, Vector right) => CountMatchingElements(mask, left, right); + + + /// CountMatchingElementsIn128BitSegments : Count matching elements in 128-bit segments + + /// + /// svuint8_t svhistseg[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector CountMatchingElementsIn128BitSegments(Vector left, Vector right) => CountMatchingElementsIn128BitSegments(left, right); + + /// + /// svuint8_t svhistseg[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector CountMatchingElementsIn128BitSegments(Vector left, Vector right) => CountMatchingElementsIn128BitSegments(left, right); + + + /// CreateWhileGreaterThanMask : While decrementing scalar is greater than + + /// + /// svbool_t svwhilegt_b8[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(int left, int right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b8[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(long left, long right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b8[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(uint left, uint right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b8[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(ulong left, ulong right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b16[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(int left, int right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b16[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(long left, long right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b16[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(uint left, uint right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b16[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(ulong left, ulong right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b32[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(int left, int right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b32[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(long left, long right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b32[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(uint left, uint right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b32[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(ulong left, ulong right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b64[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(int left, int right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b64[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(long left, long right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b64[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(uint left, uint right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b64[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanMask(ulong left, ulong right) => CreateWhileGreaterThanMask(left, right); + + + /// CreateWhileGreaterThanOrEqualMask : While decrementing scalar is greater than or equal to + + /// + /// svbool_t svwhilege_b8[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(int left, int right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b8[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(long left, long right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b8[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(uint left, uint right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b8[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(ulong left, ulong right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b16[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(int left, int right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b16[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(long left, long right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b16[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(uint left, uint right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b16[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(ulong left, ulong right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b32[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(int left, int right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b32[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(long left, long right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b32[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(uint left, uint right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b32[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(ulong left, ulong right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b64[_s32](int32_t op1, int32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(int left, int right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b64[_s64](int64_t op1, int64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(long left, long right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b64[_u32](uint32_t op1, uint32_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(uint left, uint right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b64[_u64](uint64_t op1, uint64_t op2) + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(ulong left, ulong right) => CreateWhileGreaterThanOrEqualMask(left, right); + + + /// CreateWhileReadAfterWriteMask : While free of read-after-write conflicts + + /// + /// svbool_t svwhilerw[_s8](const int8_t *op1, const int8_t *op2) + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(sbyte* left, sbyte* right) => CreateWhileReadAfterWriteMask(left, right); + + /// + /// svbool_t svwhilerw[_s16](const int16_t *op1, const int16_t *op2) + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(short* left, short* right) => CreateWhileReadAfterWriteMask(left, right); + + /// + /// svbool_t svwhilerw[_s32](const int32_t *op1, const int32_t *op2) + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(int* left, int* right) => CreateWhileReadAfterWriteMask(left, right); + + /// + /// svbool_t svwhilerw[_s64](const int64_t *op1, const int64_t *op2) + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(long* left, long* right) => CreateWhileReadAfterWriteMask(left, right); + + /// + /// svbool_t svwhilerw[_u8](const uint8_t *op1, const uint8_t *op2) + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(byte* left, byte* right) => CreateWhileReadAfterWriteMask(left, right); + + /// + /// svbool_t svwhilerw[_u16](const uint16_t *op1, const uint16_t *op2) + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(ushort* left, ushort* right) => CreateWhileReadAfterWriteMask(left, right); + + /// + /// svbool_t svwhilerw[_u32](const uint32_t *op1, const uint32_t *op2) + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(uint* left, uint* right) => CreateWhileReadAfterWriteMask(left, right); + + /// + /// svbool_t svwhilerw[_u64](const uint64_t *op1, const uint64_t *op2) + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(ulong* left, ulong* right) => CreateWhileReadAfterWriteMask(left, right); + + /// + /// svbool_t svwhilerw[_f32](const float32_t *op1, const float32_t *op2) + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(float* left, float* right) => CreateWhileReadAfterWriteMask(left, right); + + /// + /// svbool_t svwhilerw[_f64](const float64_t *op1, const float64_t *op2) + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(double* left, double* right) => CreateWhileReadAfterWriteMask(left, right); + + + /// CreateWhileWriteAfterReadMask : While free of write-after-read conflicts + + /// + /// svbool_t svwhilewr[_s8](const int8_t *op1, const int8_t *op2) + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(sbyte* left, sbyte* right) => CreateWhileWriteAfterReadMask(left, right); + + /// + /// svbool_t svwhilewr[_s16](const int16_t *op1, const int16_t *op2) + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(short* left, short* right) => CreateWhileWriteAfterReadMask(left, right); + + /// + /// svbool_t svwhilewr[_s32](const int32_t *op1, const int32_t *op2) + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(int* left, int* right) => CreateWhileWriteAfterReadMask(left, right); + + /// + /// svbool_t svwhilewr[_s64](const int64_t *op1, const int64_t *op2) + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(long* left, long* right) => CreateWhileWriteAfterReadMask(left, right); + + /// + /// svbool_t svwhilewr[_u8](const uint8_t *op1, const uint8_t *op2) + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(byte* left, byte* right) => CreateWhileWriteAfterReadMask(left, right); + + /// + /// svbool_t svwhilewr[_u16](const uint16_t *op1, const uint16_t *op2) + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(ushort* left, ushort* right) => CreateWhileWriteAfterReadMask(left, right); + + /// + /// svbool_t svwhilewr[_u32](const uint32_t *op1, const uint32_t *op2) + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(uint* left, uint* right) => CreateWhileWriteAfterReadMask(left, right); + + /// + /// svbool_t svwhilewr[_u64](const uint64_t *op1, const uint64_t *op2) + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(ulong* left, ulong* right) => CreateWhileWriteAfterReadMask(left, right); + + /// + /// svbool_t svwhilewr[_f32](const float32_t *op1, const float32_t *op2) + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(float* left, float* right) => CreateWhileWriteAfterReadMask(left, right); + + /// + /// svbool_t svwhilewr[_f64](const float64_t *op1, const float64_t *op2) + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(double* left, double* right) => CreateWhileWriteAfterReadMask(left, right); + + + /// DotProductComplex : Complex dot product + + /// + /// svint32_t svcdot[_s32](svint32_t op1, svint8_t op2, svint8_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector DotProductComplex(Vector op1, Vector op2, Vector op3, [ConstantExpected] byte rotation) => DotProductComplex(op1, op2, op3, rotation); + + /// + /// svint32_t svcdot_lane[_s32](svint32_t op1, svint8_t op2, svint8_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// + public static unsafe Vector DotProductComplex(Vector op1, Vector op2, Vector op3, ulong imm_index, [ConstantExpected] byte rotation) => DotProductComplex(op1, op2, op3, imm_index, rotation); + + /// + /// svint64_t svcdot[_s64](svint64_t op1, svint16_t op2, svint16_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector DotProductComplex(Vector op1, Vector op2, Vector op3, [ConstantExpected] byte rotation) => DotProductComplex(op1, op2, op3, rotation); + + /// + /// svint64_t svcdot_lane[_s64](svint64_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// + public static unsafe Vector DotProductComplex(Vector op1, Vector op2, Vector op3, ulong imm_index, [ConstantExpected] byte rotation) => DotProductComplex(op1, op2, op3, imm_index, rotation); + + + /// DownConvertNarrowingUpper : Down convert and narrow (top) + + /// + /// svfloat32_t svcvtnt_f32[_f64]_m(svfloat32_t even, svbool_t pg, svfloat64_t op) + /// svfloat32_t svcvtnt_f32[_f64]_x(svfloat32_t even, svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector DownConvertNarrowingUpper(Vector value) => DownConvertNarrowingUpper(value); + + + /// DownConvertRoundingOdd : Down convert, rounding to odd + + /// + /// svfloat32_t svcvtx_f32[_f64]_m(svfloat32_t inactive, svbool_t pg, svfloat64_t op) + /// svfloat32_t svcvtx_f32[_f64]_x(svbool_t pg, svfloat64_t op) + /// svfloat32_t svcvtx_f32[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector DownConvertRoundingOdd(Vector value) => DownConvertRoundingOdd(value); + + + /// DownConvertRoundingOddUpper : Down convert, rounding to odd (top) + + /// + /// svfloat32_t svcvtxnt_f32[_f64]_m(svfloat32_t even, svbool_t pg, svfloat64_t op) + /// svfloat32_t svcvtxnt_f32[_f64]_x(svfloat32_t even, svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector DownConvertRoundingOddUpper(Vector value) => DownConvertRoundingOddUpper(value); + + + /// GatherVectorByteZeroExtendNonTemporal : Load 8-bit data and zero-extend, non-temporal + + /// + /// svint32_t svldnt1ub_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorByteZeroExtendNonTemporal(mask, addresses); + + /// + /// svint32_t svldnt1ub_gather_[u32]offset_s32(svbool_t pg, const uint8_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1ub_gather_[s64]offset_s64(svbool_t pg, const uint8_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1ub_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorByteZeroExtendNonTemporal(mask, addresses); + + /// + /// svint64_t svldnt1ub_gather_[u64]offset_s64(svbool_t pg, const uint8_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svuint32_t svldnt1ub_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorByteZeroExtendNonTemporal(mask, addresses); + + /// + /// svuint32_t svldnt1ub_gather_[u32]offset_u32(svbool_t pg, const uint8_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1ub_gather_[s64]offset_u64(svbool_t pg, const uint8_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1ub_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorByteZeroExtendNonTemporal(mask, addresses); + + /// + /// svuint64_t svldnt1ub_gather_[u64]offset_u64(svbool_t pg, const uint8_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendNonTemporal(mask, address, offsets); + + + /// GatherVectorInt16SignExtendNonTemporal : Load 16-bit data and sign-extend, non-temporal + + /// + /// svint32_t svldnt1sh_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorInt16SignExtendNonTemporal(mask, addresses); + + /// + /// svint64_t svldnt1sh_gather_[s64]index_s64(svbool_t pg, const int16_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtendNonTemporal(mask, address, indices); + + /// + /// svint64_t svldnt1sh_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorInt16SignExtendNonTemporal(mask, addresses); + + /// + /// svint64_t svldnt1sh_gather_[u64]index_s64(svbool_t pg, const int16_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtendNonTemporal(mask, address, indices); + + /// + /// svuint32_t svldnt1sh_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorInt16SignExtendNonTemporal(mask, addresses); + + /// + /// svuint64_t svldnt1sh_gather_[s64]index_u64(svbool_t pg, const int16_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtendNonTemporal(mask, address, indices); + + /// + /// svuint64_t svldnt1sh_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorInt16SignExtendNonTemporal(mask, addresses); + + /// + /// svuint64_t svldnt1sh_gather_[u64]index_u64(svbool_t pg, const int16_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtendNonTemporal(mask, address, indices); + + + /// GatherVectorInt16WithByteOffsetsSignExtendNonTemporal : Load 16-bit data and sign-extend, non-temporal + + /// + /// svint32_t svldnt1sh_gather_[u32]offset_s32(svbool_t pg, const int16_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1sh_gather_[s64]offset_s64(svbool_t pg, const int16_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1sh_gather_[u64]offset_s64(svbool_t pg, const int16_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + /// + /// svuint32_t svldnt1sh_gather_[u32]offset_u32(svbool_t pg, const int16_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1sh_gather_[s64]offset_u64(svbool_t pg, const int16_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1sh_gather_[u64]offset_u64(svbool_t pg, const int16_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + + /// GatherVectorInt32SignExtendNonTemporal : Load 32-bit data and sign-extend, non-temporal + + /// + /// svint64_t svldnt1sw_gather_[s64]index_s64(svbool_t pg, const int32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendNonTemporal(mask, address, indices); + + /// + /// svint64_t svldnt1sw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorInt32SignExtendNonTemporal(mask, addresses); + + /// + /// svint64_t svldnt1sw_gather_[u64]index_s64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendNonTemporal(mask, address, indices); + + /// + /// svint64_t svldnt1sw_gather_[s64]index_s64(svbool_t pg, const int32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendNonTemporal(mask, address, indices); + + /// + /// svint64_t svldnt1sw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorInt32SignExtendNonTemporal(mask, addresses); + + /// + /// svint64_t svldnt1sw_gather_[u64]index_s64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendNonTemporal(mask, address, indices); + + /// + /// svuint64_t svldnt1sw_gather_[s64]index_u64(svbool_t pg, const int32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendNonTemporal(mask, address, indices); + + /// + /// svuint64_t svldnt1sw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorInt32SignExtendNonTemporal(mask, addresses); + + /// + /// svuint64_t svldnt1sw_gather_[u64]index_u64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendNonTemporal(mask, address, indices); + + /// + /// svuint64_t svldnt1sw_gather_[s64]index_u64(svbool_t pg, const int32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendNonTemporal(mask, address, indices); + + /// + /// svuint64_t svldnt1sw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorInt32SignExtendNonTemporal(mask, addresses); + + /// + /// svuint64_t svldnt1sw_gather_[u64]index_u64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendNonTemporal(mask, address, indices); + + + /// GatherVectorInt32WithByteOffsetsSignExtendNonTemporal : Load 32-bit data and sign-extend, non-temporal + + /// + /// svint64_t svldnt1sw_gather_[s64]offset_s64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1sw_gather_[u64]offset_s64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1sw_gather_[s64]offset_s64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1sw_gather_[u64]offset_s64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1sw_gather_[s64]offset_u64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1sw_gather_[u64]offset_u64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1sw_gather_[s64]offset_u64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1sw_gather_[u64]offset_u64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + + /// GatherVectorNonTemporal : Unextended load, non-temporal + + /// + /// svint32_t svldnt1_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses) => GatherVectorNonTemporal(mask, addresses); + + /// + /// svint32_t svldnt1_gather_[u32]offset[_s32](svbool_t pg, const int32_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, int* address, Vector offsets) => GatherVectorNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses) => GatherVectorNonTemporal(mask, addresses); + + /// + /// svint64_t svldnt1_gather_[s64]offset[_s64](svbool_t pg, const int64_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, long* address, Vector offsets) => GatherVectorNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1_gather_[u64]offset[_s64](svbool_t pg, const int64_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, long* address, Vector offsets) => GatherVectorNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1_gather_[s64]index[_s64](svbool_t pg, const int64_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, long* address, Vector indices) => GatherVectorNonTemporal(mask, address, indices); + + /// + /// svint64_t svldnt1_gather_[u64]index[_s64](svbool_t pg, const int64_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, long* address, Vector indices) => GatherVectorNonTemporal(mask, address, indices); + + /// + /// svuint32_t svldnt1_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses) => GatherVectorNonTemporal(mask, addresses); + + /// + /// svuint32_t svldnt1_gather_[u32]offset[_u32](svbool_t pg, const uint32_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, uint* address, Vector offsets) => GatherVectorNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses) => GatherVectorNonTemporal(mask, addresses); + + /// + /// svuint64_t svldnt1_gather_[s64]offset[_u64](svbool_t pg, const uint64_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, ulong* address, Vector offsets) => GatherVectorNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1_gather_[u64]offset[_u64](svbool_t pg, const uint64_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, ulong* address, Vector offsets) => GatherVectorNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1_gather_[s64]index[_u64](svbool_t pg, const uint64_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, ulong* address, Vector indices) => GatherVectorNonTemporal(mask, address, indices); + + /// + /// svuint64_t svldnt1_gather_[u64]index[_u64](svbool_t pg, const uint64_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, ulong* address, Vector indices) => GatherVectorNonTemporal(mask, address, indices); + + /// + /// svfloat32_t svldnt1_gather[_u32base]_f32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses) => GatherVectorNonTemporal(mask, addresses); + + /// + /// svfloat32_t svldnt1_gather_[u32]offset[_f32](svbool_t pg, const float32_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, float* address, Vector offsets) => GatherVectorNonTemporal(mask, address, offsets); + + /// + /// svfloat64_t svldnt1_gather_[s64]offset[_f64](svbool_t pg, const float64_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, double* address, Vector offsets) => GatherVectorNonTemporal(mask, address, offsets); + + /// + /// svfloat64_t svldnt1_gather_[s64]index[_f64](svbool_t pg, const float64_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, double* address, Vector indices) => GatherVectorNonTemporal(mask, address, indices); + + /// + /// svfloat64_t svldnt1_gather[_u64base]_f64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses) => GatherVectorNonTemporal(mask, addresses); + + /// + /// svfloat64_t svldnt1_gather_[u64]offset[_f64](svbool_t pg, const float64_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, double* address, Vector offsets) => GatherVectorNonTemporal(mask, address, offsets); + + /// + /// svfloat64_t svldnt1_gather_[u64]index[_f64](svbool_t pg, const float64_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, double* address, Vector indices) => GatherVectorNonTemporal(mask, address, indices); + + + /// GatherVectorSByteSignExtendNonTemporal : Load 8-bit data and sign-extend, non-temporal + + /// + /// svint32_t svldnt1sb_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorSByteSignExtendNonTemporal(mask, addresses); + + /// + /// svint32_t svldnt1sb_gather_[u32]offset_s32(svbool_t pg, const int8_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1sb_gather_[s64]offset_s64(svbool_t pg, const int8_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1sb_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorSByteSignExtendNonTemporal(mask, addresses); + + /// + /// svint64_t svldnt1sb_gather_[u64]offset_s64(svbool_t pg, const int8_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendNonTemporal(mask, address, offsets); + + /// + /// svuint32_t svldnt1sb_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorSByteSignExtendNonTemporal(mask, addresses); + + /// + /// svuint32_t svldnt1sb_gather_[u32]offset_u32(svbool_t pg, const int8_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1sb_gather_[s64]offset_u64(svbool_t pg, const int8_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1sb_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorSByteSignExtendNonTemporal(mask, addresses); + + /// + /// svuint64_t svldnt1sb_gather_[u64]offset_u64(svbool_t pg, const int8_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendNonTemporal(mask, address, offsets); + + + /// GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal : Load 16-bit data and zero-extend, non-temporal + + /// + /// svint32_t svldnt1uh_gather_[u32]offset_s32(svbool_t pg, const uint16_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1uh_gather_[s64]offset_s64(svbool_t pg, const uint16_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1uh_gather_[u64]offset_s64(svbool_t pg, const uint16_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svuint32_t svldnt1uh_gather_[u32]offset_u32(svbool_t pg, const uint16_t *base, svuint32_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1uh_gather_[s64]offset_u64(svbool_t pg, const uint16_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1uh_gather_[u64]offset_u64(svbool_t pg, const uint16_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + + /// GatherVectorUInt16ZeroExtendNonTemporal : Load 16-bit data and zero-extend, non-temporal + + /// + /// svint32_t svldnt1uh_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorUInt16ZeroExtendNonTemporal(mask, addresses); + + /// + /// svint64_t svldnt1uh_gather_[s64]index_s64(svbool_t pg, const uint16_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtendNonTemporal(mask, address, indices); + + /// + /// svint64_t svldnt1uh_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorUInt16ZeroExtendNonTemporal(mask, addresses); + + /// + /// svint64_t svldnt1uh_gather_[u64]index_s64(svbool_t pg, const uint16_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtendNonTemporal(mask, address, indices); + + /// + /// svuint32_t svldnt1uh_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorUInt16ZeroExtendNonTemporal(mask, addresses); + + /// + /// svuint64_t svldnt1uh_gather_[s64]index_u64(svbool_t pg, const uint16_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtendNonTemporal(mask, address, indices); + + /// + /// svuint64_t svldnt1uh_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorUInt16ZeroExtendNonTemporal(mask, addresses); + + /// + /// svuint64_t svldnt1uh_gather_[u64]index_u64(svbool_t pg, const uint16_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtendNonTemporal(mask, address, indices); + + + /// GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal : Load 32-bit data and zero-extend, non-temporal + + /// + /// svint64_t svldnt1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + + /// GatherVectorUInt32ZeroExtendNonTemporal : Load 32-bit data and zero-extend, non-temporal + + /// + /// svint64_t svldnt1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendNonTemporal(mask, address, indices); + + /// + /// svint64_t svldnt1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtendNonTemporal(mask, addresses); + + /// + /// svint64_t svldnt1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendNonTemporal(mask, address, indices); + + /// + /// svint64_t svldnt1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendNonTemporal(mask, address, indices); + + /// + /// svint64_t svldnt1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtendNonTemporal(mask, addresses); + + /// + /// svint64_t svldnt1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendNonTemporal(mask, address, indices); + + /// + /// svuint64_t svldnt1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendNonTemporal(mask, address, indices); + + /// + /// svuint64_t svldnt1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtendNonTemporal(mask, addresses); + + /// + /// svuint64_t svldnt1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendNonTemporal(mask, address, indices); + + /// + /// svuint64_t svldnt1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendNonTemporal(mask, address, indices); + + /// + /// svuint64_t svldnt1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtendNonTemporal(mask, addresses); + + /// + /// svuint64_t svldnt1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendNonTemporal(mask, address, indices); + + + /// HalvingAdd : Halving add + + /// + /// svint8_t svhadd[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svhadd[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svhadd[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector HalvingAdd(Vector left, Vector right) => HalvingAdd(left, right); + + /// + /// svint16_t svhadd[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svhadd[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svhadd[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector HalvingAdd(Vector left, Vector right) => HalvingAdd(left, right); + + /// + /// svint32_t svhadd[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svhadd[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svhadd[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector HalvingAdd(Vector left, Vector right) => HalvingAdd(left, right); + + /// + /// svint64_t svhadd[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svhadd[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svhadd[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector HalvingAdd(Vector left, Vector right) => HalvingAdd(left, right); + + /// + /// svuint8_t svhadd[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svhadd[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svhadd[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector HalvingAdd(Vector left, Vector right) => HalvingAdd(left, right); + + /// + /// svuint16_t svhadd[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svhadd[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svhadd[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector HalvingAdd(Vector left, Vector right) => HalvingAdd(left, right); + + /// + /// svuint32_t svhadd[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svhadd[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svhadd[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector HalvingAdd(Vector left, Vector right) => HalvingAdd(left, right); + + /// + /// svuint64_t svhadd[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svhadd[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svhadd[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector HalvingAdd(Vector left, Vector right) => HalvingAdd(left, right); + + + /// HalvingSubtract : Halving subtract + + /// + /// svint8_t svhsub[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svhsub[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svhsub[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector HalvingSubtract(Vector left, Vector right) => HalvingSubtract(left, right); + + /// + /// svint16_t svhsub[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svhsub[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svhsub[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector HalvingSubtract(Vector left, Vector right) => HalvingSubtract(left, right); + + /// + /// svint32_t svhsub[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svhsub[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svhsub[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector HalvingSubtract(Vector left, Vector right) => HalvingSubtract(left, right); + + /// + /// svint64_t svhsub[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svhsub[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svhsub[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector HalvingSubtract(Vector left, Vector right) => HalvingSubtract(left, right); + + /// + /// svuint8_t svhsub[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svhsub[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svhsub[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector HalvingSubtract(Vector left, Vector right) => HalvingSubtract(left, right); + + /// + /// svuint16_t svhsub[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svhsub[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svhsub[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector HalvingSubtract(Vector left, Vector right) => HalvingSubtract(left, right); + + /// + /// svuint32_t svhsub[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svhsub[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svhsub[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector HalvingSubtract(Vector left, Vector right) => HalvingSubtract(left, right); + + /// + /// svuint64_t svhsub[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svhsub[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svhsub[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector HalvingSubtract(Vector left, Vector right) => HalvingSubtract(left, right); + + + /// HalvingSubtractReversed : Halving subtract reversed + + /// + /// svint8_t svhsubr[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svhsubr[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svhsubr[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right) => HalvingSubtractReversed(left, right); + + /// + /// svint16_t svhsubr[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svhsubr[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svhsubr[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right) => HalvingSubtractReversed(left, right); + + /// + /// svint32_t svhsubr[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svhsubr[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svhsubr[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right) => HalvingSubtractReversed(left, right); + + /// + /// svint64_t svhsubr[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svhsubr[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svhsubr[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right) => HalvingSubtractReversed(left, right); + + /// + /// svuint8_t svhsubr[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svhsubr[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svhsubr[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right) => HalvingSubtractReversed(left, right); + + /// + /// svuint16_t svhsubr[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svhsubr[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svhsubr[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right) => HalvingSubtractReversed(left, right); + + /// + /// svuint32_t svhsubr[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svhsubr[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svhsubr[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right) => HalvingSubtractReversed(left, right); + + /// + /// svuint64_t svhsubr[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svhsubr[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svhsubr[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right) => HalvingSubtractReversed(left, right); + + + /// InterleavingXorLowerUpper : Interleaving exclusive OR (bottom, top) + + /// + /// svint8_t sveorbt[_s8](svint8_t odd, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right) => InterleavingXorLowerUpper(odd, left, right); + + /// + /// svint16_t sveorbt[_s16](svint16_t odd, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right) => InterleavingXorLowerUpper(odd, left, right); + + /// + /// svint32_t sveorbt[_s32](svint32_t odd, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right) => InterleavingXorLowerUpper(odd, left, right); + + /// + /// svint64_t sveorbt[_s64](svint64_t odd, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right) => InterleavingXorLowerUpper(odd, left, right); + + /// + /// svuint8_t sveorbt[_u8](svuint8_t odd, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right) => InterleavingXorLowerUpper(odd, left, right); + + /// + /// svuint16_t sveorbt[_u16](svuint16_t odd, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right) => InterleavingXorLowerUpper(odd, left, right); + + /// + /// svuint32_t sveorbt[_u32](svuint32_t odd, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right) => InterleavingXorLowerUpper(odd, left, right); + + /// + /// svuint64_t sveorbt[_u64](svuint64_t odd, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right) => InterleavingXorLowerUpper(odd, left, right); + + + /// InterleavingXorUpperLower : Interleaving exclusive OR (top, bottom) + + /// + /// svint8_t sveortb[_s8](svint8_t even, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right) => InterleavingXorUpperLower(even, left, right); + + /// + /// svint16_t sveortb[_s16](svint16_t even, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right) => InterleavingXorUpperLower(even, left, right); + + /// + /// svint32_t sveortb[_s32](svint32_t even, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right) => InterleavingXorUpperLower(even, left, right); + + /// + /// svint64_t sveortb[_s64](svint64_t even, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right) => InterleavingXorUpperLower(even, left, right); + + /// + /// svuint8_t sveortb[_u8](svuint8_t even, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right) => InterleavingXorUpperLower(even, left, right); + + /// + /// svuint16_t sveortb[_u16](svuint16_t even, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right) => InterleavingXorUpperLower(even, left, right); + + /// + /// svuint32_t sveortb[_u32](svuint32_t even, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right) => InterleavingXorUpperLower(even, left, right); + + /// + /// svuint64_t sveortb[_u64](svuint64_t even, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right) => InterleavingXorUpperLower(even, left, right); + + + /// Log2 : Base 2 logarithm as integer + + /// + /// svint32_t svlogb[_f32]_m(svint32_t inactive, svbool_t pg, svfloat32_t op) + /// svint32_t svlogb[_f32]_x(svbool_t pg, svfloat32_t op) + /// svint32_t svlogb[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector Log2(Vector value) => Log2(value); + + /// + /// svint64_t svlogb[_f64]_m(svint64_t inactive, svbool_t pg, svfloat64_t op) + /// svint64_t svlogb[_f64]_x(svbool_t pg, svfloat64_t op) + /// svint64_t svlogb[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector Log2(Vector value) => Log2(value); + + + /// Match : Detect any matching elements + + /// + /// svbool_t svmatch[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector Match(Vector mask, Vector left, Vector right) => Match(mask, left, right); + + /// + /// svbool_t svmatch[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector Match(Vector mask, Vector left, Vector right) => Match(mask, left, right); + + /// + /// svbool_t svmatch[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector Match(Vector mask, Vector left, Vector right) => Match(mask, left, right); + + /// + /// svbool_t svmatch[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector Match(Vector mask, Vector left, Vector right) => Match(mask, left, right); + + + /// MaxNumberPairwise : Maximum number pairwise + + /// + /// svfloat32_t svmaxnmp[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svmaxnmp[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector MaxNumberPairwise(Vector left, Vector right) => MaxNumberPairwise(left, right); + + /// + /// svfloat64_t svmaxnmp[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svmaxnmp[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector MaxNumberPairwise(Vector left, Vector right) => MaxNumberPairwise(left, right); + + + /// MaxPairwise : Maximum pairwise + + /// + /// svint8_t svmaxp[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svmaxp[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) => MaxPairwise(left, right); + + /// + /// svint16_t svmaxp[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svmaxp[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) => MaxPairwise(left, right); + + /// + /// svint32_t svmaxp[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svmaxp[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) => MaxPairwise(left, right); + + /// + /// svint64_t svmaxp[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svmaxp[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) => MaxPairwise(left, right); + + /// + /// svuint8_t svmaxp[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svmaxp[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) => MaxPairwise(left, right); + + /// + /// svuint16_t svmaxp[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svmaxp[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) => MaxPairwise(left, right); + + /// + /// svuint32_t svmaxp[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svmaxp[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) => MaxPairwise(left, right); + + /// + /// svuint64_t svmaxp[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svmaxp[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) => MaxPairwise(left, right); + + /// + /// svfloat32_t svmaxp[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svmaxp[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) => MaxPairwise(left, right); + + /// + /// svfloat64_t svmaxp[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svmaxp[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) => MaxPairwise(left, right); + + + /// MinNumberPairwise : Minimum number pairwise + + /// + /// svfloat32_t svminnmp[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svminnmp[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector MinNumberPairwise(Vector left, Vector right) => MinNumberPairwise(left, right); + + /// + /// svfloat64_t svminnmp[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svminnmp[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector MinNumberPairwise(Vector left, Vector right) => MinNumberPairwise(left, right); + + + /// MinPairwise : Minimum pairwise + + /// + /// svint8_t svminp[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svminp[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) => MinPairwise(left, right); + + /// + /// svint16_t svminp[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svminp[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) => MinPairwise(left, right); + + /// + /// svint32_t svminp[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svminp[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) => MinPairwise(left, right); + + /// + /// svint64_t svminp[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svminp[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) => MinPairwise(left, right); + + /// + /// svuint8_t svminp[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svminp[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) => MinPairwise(left, right); + + /// + /// svuint16_t svminp[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svminp[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) => MinPairwise(left, right); + + /// + /// svuint32_t svminp[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svminp[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) => MinPairwise(left, right); + + /// + /// svuint64_t svminp[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svminp[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) => MinPairwise(left, right); + + /// + /// svfloat32_t svminp[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// svfloat32_t svminp[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) => MinPairwise(left, right); + + /// + /// svfloat64_t svminp[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// svfloat64_t svminp[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) => MinPairwise(left, right); + + + /// MoveWideningLower : Move long (bottom) + + /// + /// svint16_t svmovlb[_s16](svint8_t op) + /// + public static unsafe Vector MoveWideningLower(Vector value) => MoveWideningLower(value); + + /// + /// svint32_t svmovlb[_s32](svint16_t op) + /// + public static unsafe Vector MoveWideningLower(Vector value) => MoveWideningLower(value); + + /// + /// svint64_t svmovlb[_s64](svint32_t op) + /// + public static unsafe Vector MoveWideningLower(Vector value) => MoveWideningLower(value); + + /// + /// svuint16_t svmovlb[_u16](svuint8_t op) + /// + public static unsafe Vector MoveWideningLower(Vector value) => MoveWideningLower(value); + + /// + /// svuint32_t svmovlb[_u32](svuint16_t op) + /// + public static unsafe Vector MoveWideningLower(Vector value) => MoveWideningLower(value); + + /// + /// svuint64_t svmovlb[_u64](svuint32_t op) + /// + public static unsafe Vector MoveWideningLower(Vector value) => MoveWideningLower(value); + + + /// MoveWideningUpper : Move long (top) + + /// + /// svint16_t svmovlt[_s16](svint8_t op) + /// + public static unsafe Vector MoveWideningUpper(Vector value) => MoveWideningUpper(value); + + /// + /// svint32_t svmovlt[_s32](svint16_t op) + /// + public static unsafe Vector MoveWideningUpper(Vector value) => MoveWideningUpper(value); + + /// + /// svint64_t svmovlt[_s64](svint32_t op) + /// + public static unsafe Vector MoveWideningUpper(Vector value) => MoveWideningUpper(value); + + /// + /// svuint16_t svmovlt[_u16](svuint8_t op) + /// + public static unsafe Vector MoveWideningUpper(Vector value) => MoveWideningUpper(value); + + /// + /// svuint32_t svmovlt[_u32](svuint16_t op) + /// + public static unsafe Vector MoveWideningUpper(Vector value) => MoveWideningUpper(value); + + /// + /// svuint64_t svmovlt[_u64](svuint32_t op) + /// + public static unsafe Vector MoveWideningUpper(Vector value) => MoveWideningUpper(value); + + + /// MultiplyAddBySelectedScalar : Multiply-add, addend first + + /// + /// svint16_t svmla_lane[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); + + /// + /// svint32_t svmla_lane[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); + + /// + /// svint64_t svmla_lane[_s64](svint64_t op1, svint64_t op2, svint64_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); + + /// + /// svuint16_t svmla_lane[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); + + /// + /// svuint32_t svmla_lane[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); + + /// + /// svuint64_t svmla_lane[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); + + + /// MultiplyAddRotateComplex : Complex multiply-add with rotate + + /// + /// svint8_t svcmla[_s8](svint8_t op1, svint8_t op2, svint8_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) => MultiplyAddRotateComplex(addend, left, right, rotation); + + /// + /// svint16_t svcmla[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) => MultiplyAddRotateComplex(addend, left, right, rotation); + + /// + /// svint32_t svcmla[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) => MultiplyAddRotateComplex(addend, left, right, rotation); + + /// + /// svint64_t svcmla[_s64](svint64_t op1, svint64_t op2, svint64_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) => MultiplyAddRotateComplex(addend, left, right, rotation); + + /// + /// svuint8_t svcmla[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) => MultiplyAddRotateComplex(addend, left, right, rotation); + + /// + /// svuint16_t svcmla[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) => MultiplyAddRotateComplex(addend, left, right, rotation); + + /// + /// svuint32_t svcmla[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) => MultiplyAddRotateComplex(addend, left, right, rotation); + + /// + /// svuint64_t svcmla[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) => MultiplyAddRotateComplex(addend, left, right, rotation); + + + /// MultiplyAddRotateComplexBySelectedScalar : Complex multiply-add with rotate + + /// + /// svint16_t svcmla_lane[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation) => MultiplyAddRotateComplexBySelectedScalar(addend, left, right, rightIndex, rotation); + + /// + /// svint32_t svcmla_lane[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation) => MultiplyAddRotateComplexBySelectedScalar(addend, left, right, rightIndex, rotation); + + /// + /// svuint16_t svcmla_lane[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation) => MultiplyAddRotateComplexBySelectedScalar(addend, left, right, rightIndex, rotation); + + /// + /// svuint32_t svcmla_lane[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation) => MultiplyAddRotateComplexBySelectedScalar(addend, left, right, rightIndex, rotation); + + + /// MultiplyAddWideningLower : Multiply-add long (bottom) + + /// + /// svint16_t svmlalb[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningLower(op1, op2, op3); + + /// + /// svint32_t svmlalb[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningLower(op1, op2, op3); + + /// + /// svint32_t svmlalb_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplyAddWideningLower(op1, op2, op3, imm_index); + + /// + /// svint64_t svmlalb[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningLower(op1, op2, op3); + + /// + /// svint64_t svmlalb_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplyAddWideningLower(op1, op2, op3, imm_index); + + /// + /// svuint16_t svmlalb[_u16](svuint16_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningLower(op1, op2, op3); + + /// + /// svuint32_t svmlalb[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningLower(op1, op2, op3); + + /// + /// svuint32_t svmlalb_lane[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplyAddWideningLower(op1, op2, op3, imm_index); + + /// + /// svuint64_t svmlalb[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningLower(op1, op2, op3); + + /// + /// svuint64_t svmlalb_lane[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplyAddWideningLower(op1, op2, op3, imm_index); + + + /// MultiplyAddWideningUpper : Multiply-add long (top) + + /// + /// svint16_t svmlalt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningUpper(op1, op2, op3); + + /// + /// svint32_t svmlalt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningUpper(op1, op2, op3); + + /// + /// svint32_t svmlalt_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplyAddWideningUpper(op1, op2, op3, imm_index); + + /// + /// svint64_t svmlalt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningUpper(op1, op2, op3); + + /// + /// svint64_t svmlalt_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplyAddWideningUpper(op1, op2, op3, imm_index); + + /// + /// svuint16_t svmlalt[_u16](svuint16_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningUpper(op1, op2, op3); + + /// + /// svuint32_t svmlalt[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningUpper(op1, op2, op3); + + /// + /// svuint32_t svmlalt_lane[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplyAddWideningUpper(op1, op2, op3, imm_index); + + /// + /// svuint64_t svmlalt[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningUpper(op1, op2, op3); + + /// + /// svuint64_t svmlalt_lane[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplyAddWideningUpper(op1, op2, op3, imm_index); + + + /// MultiplyBySelectedScalar : Multiply + + /// + /// svint16_t svmul_lane[_s16](svint16_t op1, svint16_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); + + /// + /// svint32_t svmul_lane[_s32](svint32_t op1, svint32_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); + + /// + /// svint64_t svmul_lane[_s64](svint64_t op1, svint64_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); + + /// + /// svuint16_t svmul_lane[_u16](svuint16_t op1, svuint16_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); + + /// + /// svuint32_t svmul_lane[_u32](svuint32_t op1, svuint32_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); + + /// + /// svuint64_t svmul_lane[_u64](svuint64_t op1, svuint64_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); + + + /// MultiplySubtractBySelectedScalar : Multiply-subtract, minuend first + + /// + /// svint16_t svmls_lane[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); + + /// + /// svint32_t svmls_lane[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); + + /// + /// svint64_t svmls_lane[_s64](svint64_t op1, svint64_t op2, svint64_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); + + /// + /// svuint16_t svmls_lane[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); + + /// + /// svuint32_t svmls_lane[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); + + /// + /// svuint64_t svmls_lane[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); + + + /// MultiplySubtractWideningLower : Multiply-subtract long (bottom) + + /// + /// svint16_t svmlslb[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningLower(op1, op2, op3); + + /// + /// svint32_t svmlslb[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningLower(op1, op2, op3); + + /// + /// svint32_t svmlslb_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplySubtractWideningLower(op1, op2, op3, imm_index); + + /// + /// svint64_t svmlslb[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningLower(op1, op2, op3); + + /// + /// svint64_t svmlslb_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplySubtractWideningLower(op1, op2, op3, imm_index); + + /// + /// svuint16_t svmlslb[_u16](svuint16_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningLower(op1, op2, op3); + + /// + /// svuint32_t svmlslb[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningLower(op1, op2, op3); + + /// + /// svuint32_t svmlslb_lane[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplySubtractWideningLower(op1, op2, op3, imm_index); + + /// + /// svuint64_t svmlslb[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningLower(op1, op2, op3); + + /// + /// svuint64_t svmlslb_lane[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplySubtractWideningLower(op1, op2, op3, imm_index); + + + /// MultiplySubtractWideningUpper : Multiply-subtract long (top) + + /// + /// svint16_t svmlslt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningUpper(op1, op2, op3); + + /// + /// svint32_t svmlslt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningUpper(op1, op2, op3); + + /// + /// svint32_t svmlslt_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplySubtractWideningUpper(op1, op2, op3, imm_index); + + /// + /// svint64_t svmlslt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningUpper(op1, op2, op3); + + /// + /// svint64_t svmlslt_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplySubtractWideningUpper(op1, op2, op3, imm_index); + + /// + /// svuint16_t svmlslt[_u16](svuint16_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningUpper(op1, op2, op3); + + /// + /// svuint32_t svmlslt[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningUpper(op1, op2, op3); + + /// + /// svuint32_t svmlslt_lane[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplySubtractWideningUpper(op1, op2, op3, imm_index); + + /// + /// svuint64_t svmlslt[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningUpper(op1, op2, op3); + + /// + /// svuint64_t svmlslt_lane[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplySubtractWideningUpper(op1, op2, op3, imm_index); + + + /// MultiplyWideningLower : Multiply long (bottom) + + /// + /// svint16_t svmullb[_s16](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector MultiplyWideningLower(Vector left, Vector right) => MultiplyWideningLower(left, right); + + /// + /// svint32_t svmullb[_s32](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector MultiplyWideningLower(Vector left, Vector right) => MultiplyWideningLower(left, right); + + /// + /// svint32_t svmullb_lane[_s32](svint16_t op1, svint16_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyWideningLower(Vector op1, Vector op2, ulong imm_index) => MultiplyWideningLower(op1, op2, imm_index); + + /// + /// svint64_t svmullb[_s64](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector MultiplyWideningLower(Vector left, Vector right) => MultiplyWideningLower(left, right); + + /// + /// svint64_t svmullb_lane[_s64](svint32_t op1, svint32_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyWideningLower(Vector op1, Vector op2, ulong imm_index) => MultiplyWideningLower(op1, op2, imm_index); + + /// + /// svuint16_t svmullb[_u16](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector MultiplyWideningLower(Vector left, Vector right) => MultiplyWideningLower(left, right); + + /// + /// svuint32_t svmullb[_u32](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector MultiplyWideningLower(Vector left, Vector right) => MultiplyWideningLower(left, right); + + /// + /// svuint32_t svmullb_lane[_u32](svuint16_t op1, svuint16_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyWideningLower(Vector op1, Vector op2, ulong imm_index) => MultiplyWideningLower(op1, op2, imm_index); + + /// + /// svuint64_t svmullb[_u64](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector MultiplyWideningLower(Vector left, Vector right) => MultiplyWideningLower(left, right); + + /// + /// svuint64_t svmullb_lane[_u64](svuint32_t op1, svuint32_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyWideningLower(Vector op1, Vector op2, ulong imm_index) => MultiplyWideningLower(op1, op2, imm_index); + + + /// MultiplyWideningUpper : Multiply long (top) + + /// + /// svint16_t svmullt[_s16](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector MultiplyWideningUpper(Vector left, Vector right) => MultiplyWideningUpper(left, right); + + /// + /// svint32_t svmullt[_s32](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector MultiplyWideningUpper(Vector left, Vector right) => MultiplyWideningUpper(left, right); + + /// + /// svint32_t svmullt_lane[_s32](svint16_t op1, svint16_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyWideningUpper(Vector op1, Vector op2, ulong imm_index) => MultiplyWideningUpper(op1, op2, imm_index); + + /// + /// svint64_t svmullt[_s64](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector MultiplyWideningUpper(Vector left, Vector right) => MultiplyWideningUpper(left, right); + + /// + /// svint64_t svmullt_lane[_s64](svint32_t op1, svint32_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyWideningUpper(Vector op1, Vector op2, ulong imm_index) => MultiplyWideningUpper(op1, op2, imm_index); + + /// + /// svuint16_t svmullt[_u16](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector MultiplyWideningUpper(Vector left, Vector right) => MultiplyWideningUpper(left, right); + + /// + /// svuint32_t svmullt[_u32](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector MultiplyWideningUpper(Vector left, Vector right) => MultiplyWideningUpper(left, right); + + /// + /// svuint32_t svmullt_lane[_u32](svuint16_t op1, svuint16_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyWideningUpper(Vector op1, Vector op2, ulong imm_index) => MultiplyWideningUpper(op1, op2, imm_index); + + /// + /// svuint64_t svmullt[_u64](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector MultiplyWideningUpper(Vector left, Vector right) => MultiplyWideningUpper(left, right); + + /// + /// svuint64_t svmullt_lane[_u64](svuint32_t op1, svuint32_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyWideningUpper(Vector op1, Vector op2, ulong imm_index) => MultiplyWideningUpper(op1, op2, imm_index); + + + /// NoMatch : Detect no matching elements + + /// + /// svbool_t svnmatch[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector NoMatch(Vector mask, Vector left, Vector right) => NoMatch(mask, left, right); + + /// + /// svbool_t svnmatch[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector NoMatch(Vector mask, Vector left, Vector right) => NoMatch(mask, left, right); + + /// + /// svbool_t svnmatch[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector NoMatch(Vector mask, Vector left, Vector right) => NoMatch(mask, left, right); + + /// + /// svbool_t svnmatch[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector NoMatch(Vector mask, Vector left, Vector right) => NoMatch(mask, left, right); + + + /// PolynomialMultiply : Polynomial multiply + + /// + /// svuint8_t svpmul[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector PolynomialMultiply(Vector left, Vector right) => PolynomialMultiply(left, right); + + + /// PolynomialMultiplyWideningLower : Polynomial multiply long (bottom) + + /// + /// svuint8_t svpmullb_pair[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, Vector right) => PolynomialMultiplyWideningLower(left, right); + + /// + /// svuint16_t svpmullb[_u16](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, Vector right) => PolynomialMultiplyWideningLower(left, right); + + /// + /// svuint32_t svpmullb_pair[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, Vector right) => PolynomialMultiplyWideningLower(left, right); + + /// + /// svuint64_t svpmullb[_u64](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, Vector right) => PolynomialMultiplyWideningLower(left, right); + + + /// PolynomialMultiplyWideningUpper : Polynomial multiply long (top) + + /// + /// svuint8_t svpmullt_pair[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, Vector right) => PolynomialMultiplyWideningUpper(left, right); + + /// + /// svuint16_t svpmullt[_u16](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, Vector right) => PolynomialMultiplyWideningUpper(left, right); + + /// + /// svuint32_t svpmullt_pair[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, Vector right) => PolynomialMultiplyWideningUpper(left, right); + + /// + /// svuint64_t svpmullt[_u64](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, Vector right) => PolynomialMultiplyWideningUpper(left, right); + + + /// ReciprocalEstimate : Reciprocal estimate + + /// + /// svuint32_t svrecpe[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// svuint32_t svrecpe[_u32]_x(svbool_t pg, svuint32_t op) + /// svuint32_t svrecpe[_u32]_z(svbool_t pg, svuint32_t op) + /// + public static unsafe Vector ReciprocalEstimate(Vector value) => ReciprocalEstimate(value); + + + /// ReciprocalSqrtEstimate : Reciprocal square root estimate + + /// + /// svuint32_t svrsqrte[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// svuint32_t svrsqrte[_u32]_x(svbool_t pg, svuint32_t op) + /// svuint32_t svrsqrte[_u32]_z(svbool_t pg, svuint32_t op) + /// + public static unsafe Vector ReciprocalSqrtEstimate(Vector value) => ReciprocalSqrtEstimate(value); + + + /// RoundingAddHighNarowingLower : Rounding add narrow high part (bottom) + + /// + /// svint8_t svraddhnb[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector RoundingAddHighNarowingLower(Vector left, Vector right) => RoundingAddHighNarowingLower(left, right); + + /// + /// svint16_t svraddhnb[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector RoundingAddHighNarowingLower(Vector left, Vector right) => RoundingAddHighNarowingLower(left, right); + + /// + /// svint32_t svraddhnb[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector RoundingAddHighNarowingLower(Vector left, Vector right) => RoundingAddHighNarowingLower(left, right); + + /// + /// svuint8_t svraddhnb[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector RoundingAddHighNarowingLower(Vector left, Vector right) => RoundingAddHighNarowingLower(left, right); + + /// + /// svuint16_t svraddhnb[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector RoundingAddHighNarowingLower(Vector left, Vector right) => RoundingAddHighNarowingLower(left, right); + + /// + /// svuint32_t svraddhnb[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector RoundingAddHighNarowingLower(Vector left, Vector right) => RoundingAddHighNarowingLower(left, right); + + + /// RoundingAddHighNarowingUpper : Rounding add narrow high part (top) + + /// + /// svint8_t svraddhnt[_s16](svint8_t even, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, Vector right) => RoundingAddHighNarowingUpper(even, left, right); + + /// + /// svint16_t svraddhnt[_s32](svint16_t even, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, Vector right) => RoundingAddHighNarowingUpper(even, left, right); + + /// + /// svint32_t svraddhnt[_s64](svint32_t even, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, Vector right) => RoundingAddHighNarowingUpper(even, left, right); + + /// + /// svuint8_t svraddhnt[_u16](svuint8_t even, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, Vector right) => RoundingAddHighNarowingUpper(even, left, right); + + /// + /// svuint16_t svraddhnt[_u32](svuint16_t even, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, Vector right) => RoundingAddHighNarowingUpper(even, left, right); + + /// + /// svuint32_t svraddhnt[_u64](svuint32_t even, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, Vector right) => RoundingAddHighNarowingUpper(even, left, right); + + + /// RoundingHalvingAdd : Rounding halving add + + /// + /// svint8_t svrhadd[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svrhadd[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svrhadd[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right) => RoundingHalvingAdd(left, right); + + /// + /// svint16_t svrhadd[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svrhadd[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svrhadd[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right) => RoundingHalvingAdd(left, right); + + /// + /// svint32_t svrhadd[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svrhadd[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svrhadd[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right) => RoundingHalvingAdd(left, right); + + /// + /// svint64_t svrhadd[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svrhadd[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svrhadd[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right) => RoundingHalvingAdd(left, right); + + /// + /// svuint8_t svrhadd[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svrhadd[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svrhadd[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right) => RoundingHalvingAdd(left, right); + + /// + /// svuint16_t svrhadd[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svrhadd[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svrhadd[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right) => RoundingHalvingAdd(left, right); + + /// + /// svuint32_t svrhadd[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svrhadd[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svrhadd[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right) => RoundingHalvingAdd(left, right); + + /// + /// svuint64_t svrhadd[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svrhadd[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svrhadd[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right) => RoundingHalvingAdd(left, right); + + + /// RoundingSubtractHighNarowingLower : Rounding subtract narrow high part (bottom) + + /// + /// svint8_t svrsubhnb[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, Vector right) => RoundingSubtractHighNarowingLower(left, right); + + /// + /// svint16_t svrsubhnb[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, Vector right) => RoundingSubtractHighNarowingLower(left, right); + + /// + /// svint32_t svrsubhnb[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, Vector right) => RoundingSubtractHighNarowingLower(left, right); + + /// + /// svuint8_t svrsubhnb[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, Vector right) => RoundingSubtractHighNarowingLower(left, right); + + /// + /// svuint16_t svrsubhnb[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, Vector right) => RoundingSubtractHighNarowingLower(left, right); + + /// + /// svuint32_t svrsubhnb[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, Vector right) => RoundingSubtractHighNarowingLower(left, right); + + + /// RoundingSubtractHighNarowingUpper : Rounding subtract narrow high part (top) + + /// + /// svint8_t svrsubhnt[_s16](svint8_t even, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, Vector right) => RoundingSubtractHighNarowingUpper(even, left, right); + + /// + /// svint16_t svrsubhnt[_s32](svint16_t even, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, Vector right) => RoundingSubtractHighNarowingUpper(even, left, right); + + /// + /// svint32_t svrsubhnt[_s64](svint32_t even, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, Vector right) => RoundingSubtractHighNarowingUpper(even, left, right); + + /// + /// svuint8_t svrsubhnt[_u16](svuint8_t even, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, Vector right) => RoundingSubtractHighNarowingUpper(even, left, right); + + /// + /// svuint16_t svrsubhnt[_u32](svuint16_t even, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, Vector right) => RoundingSubtractHighNarowingUpper(even, left, right); + + /// + /// svuint32_t svrsubhnt[_u64](svuint32_t even, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, Vector right) => RoundingSubtractHighNarowingUpper(even, left, right); + + + /// SaturatingAbs : Saturating absolute value + + /// + /// svint8_t svqabs[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) + /// svint8_t svqabs[_s8]_x(svbool_t pg, svint8_t op) + /// svint8_t svqabs[_s8]_z(svbool_t pg, svint8_t op) + /// + public static unsafe Vector SaturatingAbs(Vector value) => SaturatingAbs(value); + + /// + /// svint16_t svqabs[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) + /// svint16_t svqabs[_s16]_x(svbool_t pg, svint16_t op) + /// svint16_t svqabs[_s16]_z(svbool_t pg, svint16_t op) + /// + public static unsafe Vector SaturatingAbs(Vector value) => SaturatingAbs(value); + + /// + /// svint32_t svqabs[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// svint32_t svqabs[_s32]_x(svbool_t pg, svint32_t op) + /// svint32_t svqabs[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector SaturatingAbs(Vector value) => SaturatingAbs(value); + + /// + /// svint64_t svqabs[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// svint64_t svqabs[_s64]_x(svbool_t pg, svint64_t op) + /// svint64_t svqabs[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector SaturatingAbs(Vector value) => SaturatingAbs(value); + + + /// SaturatingComplexAddRotate : Saturating complex add with rotate + + /// + /// svint8_t svqcadd[_s8](svint8_t op1, svint8_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector SaturatingComplexAddRotate(Vector op1, Vector op2, [ConstantExpected] byte rotation) => SaturatingComplexAddRotate(op1, op2, rotation); + + /// + /// svint16_t svqcadd[_s16](svint16_t op1, svint16_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector SaturatingComplexAddRotate(Vector op1, Vector op2, [ConstantExpected] byte rotation) => SaturatingComplexAddRotate(op1, op2, rotation); + + /// + /// svint32_t svqcadd[_s32](svint32_t op1, svint32_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector SaturatingComplexAddRotate(Vector op1, Vector op2, [ConstantExpected] byte rotation) => SaturatingComplexAddRotate(op1, op2, rotation); + + /// + /// svint64_t svqcadd[_s64](svint64_t op1, svint64_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector SaturatingComplexAddRotate(Vector op1, Vector op2, [ConstantExpected] byte rotation) => SaturatingComplexAddRotate(op1, op2, rotation); + + + /// SaturatingDoublingMultiplyAddWideningLower : Saturating doubling multiply-add long (bottom) + + /// + /// svint16_t svqdmlalb[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplyAddWideningLower(op1, op2, op3); + + /// + /// svint32_t svqdmlalb[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplyAddWideningLower(op1, op2, op3); + + /// + /// svint32_t svqdmlalb_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingDoublingMultiplyAddWideningLower(op1, op2, op3, imm_index); + + /// + /// svint64_t svqdmlalb[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplyAddWideningLower(op1, op2, op3); + + /// + /// svint64_t svqdmlalb_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingDoublingMultiplyAddWideningLower(op1, op2, op3, imm_index); + + + /// SaturatingDoublingMultiplyAddWideningLowerUpper : Saturating doubling multiply-add long (bottom × top) + + /// + /// svint16_t svqdmlalbt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLowerUpper(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplyAddWideningLowerUpper(op1, op2, op3); + + /// + /// svint32_t svqdmlalbt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLowerUpper(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplyAddWideningLowerUpper(op1, op2, op3); + + /// + /// svint64_t svqdmlalbt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLowerUpper(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplyAddWideningLowerUpper(op1, op2, op3); + + + /// SaturatingDoublingMultiplyAddWideningUpper : Saturating doubling multiply-add long (top) + + /// + /// svint16_t svqdmlalt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplyAddWideningUpper(op1, op2, op3); + + /// + /// svint32_t svqdmlalt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplyAddWideningUpper(op1, op2, op3); + + /// + /// svint32_t svqdmlalt_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingDoublingMultiplyAddWideningUpper(op1, op2, op3, imm_index); + + /// + /// svint64_t svqdmlalt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplyAddWideningUpper(op1, op2, op3); + + /// + /// svint64_t svqdmlalt_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingDoublingMultiplyAddWideningUpper(op1, op2, op3, imm_index); + + + /// SaturatingDoublingMultiplyHigh : Saturating doubling multiply high + + /// + /// svint8_t svqdmulh[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector left, Vector right) => SaturatingDoublingMultiplyHigh(left, right); + + /// + /// svint16_t svqdmulh[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector left, Vector right) => SaturatingDoublingMultiplyHigh(left, right); + + /// + /// svint16_t svqdmulh_lane[_s16](svint16_t op1, svint16_t op2, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector op1, Vector op2, ulong imm_index) => SaturatingDoublingMultiplyHigh(op1, op2, imm_index); + + /// + /// svint32_t svqdmulh[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector left, Vector right) => SaturatingDoublingMultiplyHigh(left, right); + + /// + /// svint32_t svqdmulh_lane[_s32](svint32_t op1, svint32_t op2, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector op1, Vector op2, ulong imm_index) => SaturatingDoublingMultiplyHigh(op1, op2, imm_index); + + /// + /// svint64_t svqdmulh[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector left, Vector right) => SaturatingDoublingMultiplyHigh(left, right); + + /// + /// svint64_t svqdmulh_lane[_s64](svint64_t op1, svint64_t op2, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector op1, Vector op2, ulong imm_index) => SaturatingDoublingMultiplyHigh(op1, op2, imm_index); + + + /// SaturatingDoublingMultiplySubtractWideningLower : Saturating doubling multiply-subtract long (bottom) + + /// + /// svint16_t svqdmlslb[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplySubtractWideningLower(op1, op2, op3); + + /// + /// svint32_t svqdmlslb[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplySubtractWideningLower(op1, op2, op3); + + /// + /// svint32_t svqdmlslb_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingDoublingMultiplySubtractWideningLower(op1, op2, op3, imm_index); + + /// + /// svint64_t svqdmlslb[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplySubtractWideningLower(op1, op2, op3); + + /// + /// svint64_t svqdmlslb_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingDoublingMultiplySubtractWideningLower(op1, op2, op3, imm_index); + + + /// SaturatingDoublingMultiplySubtractWideningLowerUpper : Saturating doubling multiply-subtract long (bottom × top) + + /// + /// svint16_t svqdmlslbt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLowerUpper(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplySubtractWideningLowerUpper(op1, op2, op3); + + /// + /// svint32_t svqdmlslbt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLowerUpper(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplySubtractWideningLowerUpper(op1, op2, op3); + + /// + /// svint64_t svqdmlslbt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLowerUpper(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplySubtractWideningLowerUpper(op1, op2, op3); + + + /// SaturatingDoublingMultiplySubtractWideningUpper : Saturating doubling multiply-subtract long (top) + + /// + /// svint16_t svqdmlslt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplySubtractWideningUpper(op1, op2, op3); + + /// + /// svint32_t svqdmlslt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplySubtractWideningUpper(op1, op2, op3); + + /// + /// svint32_t svqdmlslt_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingDoublingMultiplySubtractWideningUpper(op1, op2, op3, imm_index); + + /// + /// svint64_t svqdmlslt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplySubtractWideningUpper(op1, op2, op3); + + /// + /// svint64_t svqdmlslt_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingDoublingMultiplySubtractWideningUpper(op1, op2, op3, imm_index); + + + /// SaturatingDoublingMultiplyWideningLower : Saturating doubling multiply long (bottom) + + /// + /// svint16_t svqdmullb[_s16](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningLower(Vector left, Vector right) => SaturatingDoublingMultiplyWideningLower(left, right); + + /// + /// svint32_t svqdmullb[_s32](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningLower(Vector left, Vector right) => SaturatingDoublingMultiplyWideningLower(left, right); + + /// + /// svint32_t svqdmullb_lane[_s32](svint16_t op1, svint16_t op2, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningLower(Vector op1, Vector op2, ulong imm_index) => SaturatingDoublingMultiplyWideningLower(op1, op2, imm_index); + + /// + /// svint64_t svqdmullb[_s64](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningLower(Vector left, Vector right) => SaturatingDoublingMultiplyWideningLower(left, right); + + /// + /// svint64_t svqdmullb_lane[_s64](svint32_t op1, svint32_t op2, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningLower(Vector op1, Vector op2, ulong imm_index) => SaturatingDoublingMultiplyWideningLower(op1, op2, imm_index); + + + /// SaturatingDoublingMultiplyWideningUpper : Saturating doubling multiply long (top) + + /// + /// svint16_t svqdmullt[_s16](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningUpper(Vector left, Vector right) => SaturatingDoublingMultiplyWideningUpper(left, right); + + /// + /// svint32_t svqdmullt[_s32](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningUpper(Vector left, Vector right) => SaturatingDoublingMultiplyWideningUpper(left, right); + + /// + /// svint32_t svqdmullt_lane[_s32](svint16_t op1, svint16_t op2, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningUpper(Vector op1, Vector op2, ulong imm_index) => SaturatingDoublingMultiplyWideningUpper(op1, op2, imm_index); + + /// + /// svint64_t svqdmullt[_s64](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningUpper(Vector left, Vector right) => SaturatingDoublingMultiplyWideningUpper(left, right); + + /// + /// svint64_t svqdmullt_lane[_s64](svint32_t op1, svint32_t op2, uint64_t imm_index) + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningUpper(Vector op1, Vector op2, ulong imm_index) => SaturatingDoublingMultiplyWideningUpper(op1, op2, imm_index); + + + /// SaturatingExtractNarrowingLower : Saturating extract narrow (bottom) + + /// + /// svint8_t svqxtnb[_s16](svint16_t op) + /// + public static unsafe Vector SaturatingExtractNarrowingLower(Vector value) => SaturatingExtractNarrowingLower(value); + + /// + /// svint16_t svqxtnb[_s32](svint32_t op) + /// + public static unsafe Vector SaturatingExtractNarrowingLower(Vector value) => SaturatingExtractNarrowingLower(value); + + /// + /// svint32_t svqxtnb[_s64](svint64_t op) + /// + public static unsafe Vector SaturatingExtractNarrowingLower(Vector value) => SaturatingExtractNarrowingLower(value); + + /// + /// svuint8_t svqxtnb[_u16](svuint16_t op) + /// + public static unsafe Vector SaturatingExtractNarrowingLower(Vector value) => SaturatingExtractNarrowingLower(value); + + /// + /// svuint16_t svqxtnb[_u32](svuint32_t op) + /// + public static unsafe Vector SaturatingExtractNarrowingLower(Vector value) => SaturatingExtractNarrowingLower(value); + + /// + /// svuint32_t svqxtnb[_u64](svuint64_t op) + /// + public static unsafe Vector SaturatingExtractNarrowingLower(Vector value) => SaturatingExtractNarrowingLower(value); + + + /// SaturatingExtractNarrowingUpper : Saturating extract narrow (top) + + /// + /// svint8_t svqxtnt[_s16](svint8_t even, svint16_t op) + /// + public static unsafe Vector SaturatingExtractNarrowingUpper(Vector even, Vector op) => SaturatingExtractNarrowingUpper(even, op); + + /// + /// svint16_t svqxtnt[_s32](svint16_t even, svint32_t op) + /// + public static unsafe Vector SaturatingExtractNarrowingUpper(Vector even, Vector op) => SaturatingExtractNarrowingUpper(even, op); + + /// + /// svint32_t svqxtnt[_s64](svint32_t even, svint64_t op) + /// + public static unsafe Vector SaturatingExtractNarrowingUpper(Vector even, Vector op) => SaturatingExtractNarrowingUpper(even, op); + + /// + /// svuint8_t svqxtnt[_u16](svuint8_t even, svuint16_t op) + /// + public static unsafe Vector SaturatingExtractNarrowingUpper(Vector even, Vector op) => SaturatingExtractNarrowingUpper(even, op); + + /// + /// svuint16_t svqxtnt[_u32](svuint16_t even, svuint32_t op) + /// + public static unsafe Vector SaturatingExtractNarrowingUpper(Vector even, Vector op) => SaturatingExtractNarrowingUpper(even, op); + + /// + /// svuint32_t svqxtnt[_u64](svuint32_t even, svuint64_t op) + /// + public static unsafe Vector SaturatingExtractNarrowingUpper(Vector even, Vector op) => SaturatingExtractNarrowingUpper(even, op); + + + /// SaturatingExtractUnsignedNarrowingLower : Saturating extract unsigned narrow (bottom) + + /// + /// svuint8_t svqxtunb[_s16](svint16_t op) + /// + public static unsafe Vector SaturatingExtractUnsignedNarrowingLower(Vector value) => SaturatingExtractUnsignedNarrowingLower(value); + + /// + /// svuint16_t svqxtunb[_s32](svint32_t op) + /// + public static unsafe Vector SaturatingExtractUnsignedNarrowingLower(Vector value) => SaturatingExtractUnsignedNarrowingLower(value); + + /// + /// svuint32_t svqxtunb[_s64](svint64_t op) + /// + public static unsafe Vector SaturatingExtractUnsignedNarrowingLower(Vector value) => SaturatingExtractUnsignedNarrowingLower(value); + + + /// SaturatingExtractUnsignedNarrowingUpper : Saturating extract unsigned narrow (top) + + /// + /// svuint8_t svqxtunt[_s16](svuint8_t even, svint16_t op) + /// + public static unsafe Vector SaturatingExtractUnsignedNarrowingUpper(Vector even, Vector op) => SaturatingExtractUnsignedNarrowingUpper(even, op); + + /// + /// svuint16_t svqxtunt[_s32](svuint16_t even, svint32_t op) + /// + public static unsafe Vector SaturatingExtractUnsignedNarrowingUpper(Vector even, Vector op) => SaturatingExtractUnsignedNarrowingUpper(even, op); + + /// + /// svuint32_t svqxtunt[_s64](svuint32_t even, svint64_t op) + /// + public static unsafe Vector SaturatingExtractUnsignedNarrowingUpper(Vector even, Vector op) => SaturatingExtractUnsignedNarrowingUpper(even, op); + + + /// SaturatingNegate : Saturating negate + + /// + /// svint8_t svqneg[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) + /// svint8_t svqneg[_s8]_x(svbool_t pg, svint8_t op) + /// svint8_t svqneg[_s8]_z(svbool_t pg, svint8_t op) + /// + public static unsafe Vector SaturatingNegate(Vector value) => SaturatingNegate(value); + + /// + /// svint16_t svqneg[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) + /// svint16_t svqneg[_s16]_x(svbool_t pg, svint16_t op) + /// svint16_t svqneg[_s16]_z(svbool_t pg, svint16_t op) + /// + public static unsafe Vector SaturatingNegate(Vector value) => SaturatingNegate(value); + + /// + /// svint32_t svqneg[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// svint32_t svqneg[_s32]_x(svbool_t pg, svint32_t op) + /// svint32_t svqneg[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector SaturatingNegate(Vector value) => SaturatingNegate(value); + + /// + /// svint64_t svqneg[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// svint64_t svqneg[_s64]_x(svbool_t pg, svint64_t op) + /// svint64_t svqneg[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector SaturatingNegate(Vector value) => SaturatingNegate(value); + + + /// SaturatingRoundingDoublingComplexMultiplyAddHighRotate : Saturating rounding doubling complex multiply-add high with rotate + + /// + /// svint8_t svqrdcmlah[_s8](svint8_t op1, svint8_t op2, svint8_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(Vector op1, Vector op2, Vector op3, [ConstantExpected] byte rotation) => SaturatingRoundingDoublingComplexMultiplyAddHighRotate(op1, op2, op3, rotation); + + /// + /// svint16_t svqrdcmlah[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(Vector op1, Vector op2, Vector op3, [ConstantExpected] byte rotation) => SaturatingRoundingDoublingComplexMultiplyAddHighRotate(op1, op2, op3, rotation); + + /// + /// svint16_t svqrdcmlah_lane[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// + public static unsafe Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(Vector op1, Vector op2, Vector op3, ulong imm_index, [ConstantExpected] byte rotation) => SaturatingRoundingDoublingComplexMultiplyAddHighRotate(op1, op2, op3, imm_index, rotation); + + /// + /// svint32_t svqrdcmlah[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(Vector op1, Vector op2, Vector op3, [ConstantExpected] byte rotation) => SaturatingRoundingDoublingComplexMultiplyAddHighRotate(op1, op2, op3, rotation); + + /// + /// svint32_t svqrdcmlah_lane[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// + public static unsafe Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(Vector op1, Vector op2, Vector op3, ulong imm_index, [ConstantExpected] byte rotation) => SaturatingRoundingDoublingComplexMultiplyAddHighRotate(op1, op2, op3, imm_index, rotation); + + /// + /// svint64_t svqrdcmlah[_s64](svint64_t op1, svint64_t op2, svint64_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(Vector op1, Vector op2, Vector op3, [ConstantExpected] byte rotation) => SaturatingRoundingDoublingComplexMultiplyAddHighRotate(op1, op2, op3, rotation); + + + /// SaturatingRoundingDoublingMultiplyAddHigh : Saturating rounding doubling multiply-add high + + /// + /// svint8_t svqrdmlah[_s8](svint8_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3) => SaturatingRoundingDoublingMultiplyAddHigh(op1, op2, op3); + + /// + /// svint16_t svqrdmlah[_s16](svint16_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3) => SaturatingRoundingDoublingMultiplyAddHigh(op1, op2, op3); + + /// + /// svint16_t svqrdmlah_lane[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingRoundingDoublingMultiplyAddHigh(op1, op2, op3, imm_index); + + /// + /// svint32_t svqrdmlah[_s32](svint32_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3) => SaturatingRoundingDoublingMultiplyAddHigh(op1, op2, op3); + + /// + /// svint32_t svqrdmlah_lane[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingRoundingDoublingMultiplyAddHigh(op1, op2, op3, imm_index); + + /// + /// svint64_t svqrdmlah[_s64](svint64_t op1, svint64_t op2, svint64_t op3) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3) => SaturatingRoundingDoublingMultiplyAddHigh(op1, op2, op3); + + /// + /// svint64_t svqrdmlah_lane[_s64](svint64_t op1, svint64_t op2, svint64_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingRoundingDoublingMultiplyAddHigh(op1, op2, op3, imm_index); + + + /// SaturatingRoundingDoublingMultiplyHigh : Saturating rounding doubling multiply high + + /// + /// svint8_t svqrdmulh[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector left, Vector right) => SaturatingRoundingDoublingMultiplyHigh(left, right); + + /// + /// svint16_t svqrdmulh[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector left, Vector right) => SaturatingRoundingDoublingMultiplyHigh(left, right); + + /// + /// svint16_t svqrdmulh_lane[_s16](svint16_t op1, svint16_t op2, uint64_t imm_index) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector op1, Vector op2, ulong imm_index) => SaturatingRoundingDoublingMultiplyHigh(op1, op2, imm_index); + + /// + /// svint32_t svqrdmulh[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector left, Vector right) => SaturatingRoundingDoublingMultiplyHigh(left, right); + + /// + /// svint32_t svqrdmulh_lane[_s32](svint32_t op1, svint32_t op2, uint64_t imm_index) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector op1, Vector op2, ulong imm_index) => SaturatingRoundingDoublingMultiplyHigh(op1, op2, imm_index); + + /// + /// svint64_t svqrdmulh[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector left, Vector right) => SaturatingRoundingDoublingMultiplyHigh(left, right); + + /// + /// svint64_t svqrdmulh_lane[_s64](svint64_t op1, svint64_t op2, uint64_t imm_index) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector op1, Vector op2, ulong imm_index) => SaturatingRoundingDoublingMultiplyHigh(op1, op2, imm_index); + + + /// SaturatingRoundingDoublingMultiplySubtractHigh : Saturating rounding doubling multiply-subtract high + + /// + /// svint8_t svqrdmlsh[_s8](svint8_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3) => SaturatingRoundingDoublingMultiplySubtractHigh(op1, op2, op3); + + /// + /// svint16_t svqrdmlsh[_s16](svint16_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3) => SaturatingRoundingDoublingMultiplySubtractHigh(op1, op2, op3); + + /// + /// svint16_t svqrdmlsh_lane[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingRoundingDoublingMultiplySubtractHigh(op1, op2, op3, imm_index); + + /// + /// svint32_t svqrdmlsh[_s32](svint32_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3) => SaturatingRoundingDoublingMultiplySubtractHigh(op1, op2, op3); + + /// + /// svint32_t svqrdmlsh_lane[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingRoundingDoublingMultiplySubtractHigh(op1, op2, op3, imm_index); + + /// + /// svint64_t svqrdmlsh[_s64](svint64_t op1, svint64_t op2, svint64_t op3) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3) => SaturatingRoundingDoublingMultiplySubtractHigh(op1, op2, op3); + + /// + /// svint64_t svqrdmlsh_lane[_s64](svint64_t op1, svint64_t op2, svint64_t op3, uint64_t imm_index) + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingRoundingDoublingMultiplySubtractHigh(op1, op2, op3, imm_index); + + + /// Scatter16BitNarrowing : Truncate to 16 bits and store, non-temporal + + /// + /// void svstnt1h_scatter[_u32base_s32](svbool_t pg, svuint32_t bases, svint32_t data) + /// + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter16BitNarrowing(mask, addresses, data); + + /// + /// void svstnt1h_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) + /// + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter16BitNarrowing(mask, addresses, data); + + /// + /// void svstnt1h_scatter[_u32base_u32](svbool_t pg, svuint32_t bases, svuint32_t data) + /// + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter16BitNarrowing(mask, addresses, data); + + /// + /// void svstnt1h_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) + /// + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter16BitNarrowing(mask, addresses, data); + + + /// Scatter16BitWithByteOffsetsNarrowing : Truncate to 16 bits and store, non-temporal + + /// + /// void svstnt1h_scatter_[u32]offset[_s32](svbool_t pg, int16_t *base, svuint32_t offsets, svint32_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1h_scatter_[s64]offset[_s64](svbool_t pg, int16_t *base, svint64_t offsets, svint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1h_scatter_[u64]offset[_s64](svbool_t pg, int16_t *base, svuint64_t offsets, svint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1h_scatter_[s64]index[_s64](svbool_t pg, int16_t *base, svint64_t indices, svint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svstnt1h_scatter_[u64]index[_s64](svbool_t pg, int16_t *base, svuint64_t indices, svint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svstnt1h_scatter_[u32]offset[_u32](svbool_t pg, uint16_t *base, svuint32_t offsets, svuint32_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1h_scatter_[s64]offset[_u64](svbool_t pg, uint16_t *base, svint64_t offsets, svuint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1h_scatter_[u64]offset[_u64](svbool_t pg, uint16_t *base, svuint64_t offsets, svuint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1h_scatter_[s64]index[_u64](svbool_t pg, uint16_t *base, svint64_t indices, svuint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svstnt1h_scatter_[u64]index[_u64](svbool_t pg, uint16_t *base, svuint64_t indices, svuint64_t data) + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, indices, data); + + + /// Scatter32BitNarrowing : Truncate to 32 bits and store, non-temporal + + /// + /// void svstnt1w_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) + /// + public static unsafe void Scatter32BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter32BitNarrowing(mask, addresses, data); + + /// + /// void svstnt1w_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) + /// + public static unsafe void Scatter32BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter32BitNarrowing(mask, addresses, data); + + + /// Scatter32BitWithByteOffsetsNarrowing : Truncate to 32 bits and store, non-temporal + + /// + /// void svstnt1w_scatter_[s64]offset[_s64](svbool_t pg, int32_t *base, svint64_t offsets, svint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector offsets, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1w_scatter_[u64]offset[_s64](svbool_t pg, int32_t *base, svuint64_t offsets, svint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector offsets, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1w_scatter_[s64]index[_s64](svbool_t pg, int32_t *base, svint64_t indices, svint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector indices, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svstnt1w_scatter_[u64]index[_s64](svbool_t pg, int32_t *base, svuint64_t indices, svint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector indices, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svstnt1w_scatter_[s64]offset[_u64](svbool_t pg, uint32_t *base, svint64_t offsets, svuint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector offsets, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1w_scatter_[u64]offset[_u64](svbool_t pg, uint32_t *base, svuint64_t offsets, svuint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector offsets, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1w_scatter_[s64]index[_u64](svbool_t pg, uint32_t *base, svint64_t indices, svuint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector indices, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svstnt1w_scatter_[u64]index[_u64](svbool_t pg, uint32_t *base, svuint64_t indices, svuint64_t data) + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector indices, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, indices, data); + + + /// Scatter8BitNarrowing : Truncate to 8 bits and store, non-temporal + + /// + /// void svstnt1b_scatter[_u32base_s32](svbool_t pg, svuint32_t bases, svint32_t data) + /// + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter8BitNarrowing(mask, addresses, data); + + /// + /// void svstnt1b_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) + /// + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter8BitNarrowing(mask, addresses, data); + + /// + /// void svstnt1b_scatter[_u32base_u32](svbool_t pg, svuint32_t bases, svuint32_t data) + /// + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter8BitNarrowing(mask, addresses, data); + + /// + /// void svstnt1b_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) + /// + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter8BitNarrowing(mask, addresses, data); + + + /// Scatter8BitWithByteOffsetsNarrowing : Truncate to 8 bits and store, non-temporal + + /// + /// void svstnt1b_scatter_[u32]offset[_s32](svbool_t pg, int8_t *base, svuint32_t offsets, svint32_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1b_scatter_[s64]offset[_s64](svbool_t pg, int8_t *base, svint64_t offsets, svint64_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1b_scatter_[u64]offset[_s64](svbool_t pg, int8_t *base, svuint64_t offsets, svint64_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1b_scatter_[u32]offset[_u32](svbool_t pg, uint8_t *base, svuint32_t offsets, svuint32_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1b_scatter_[s64]offset[_u64](svbool_t pg, uint8_t *base, svint64_t offsets, svuint64_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1b_scatter_[u64]offset[_u64](svbool_t pg, uint8_t *base, svuint64_t offsets, svuint64_t data) + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + + /// ScatterNonTemporal : Non-truncating store, non-temporal + + /// + /// void svstnt1_scatter[_u32base_s32](svbool_t pg, svuint32_t bases, svint32_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, Vector addresses, Vector data) => ScatterNonTemporal(mask, addresses, data); + + /// + /// void svstnt1_scatter_[u32]offset[_s32](svbool_t pg, int32_t *base, svuint32_t offsets, svint32_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, int* base, Vector offsets, Vector data) => ScatterNonTemporal(mask, base, offsets, data); + + /// + /// void svstnt1_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, Vector addresses, Vector data) => ScatterNonTemporal(mask, addresses, data); + + /// + /// void svstnt1_scatter_[s64]offset[_s64](svbool_t pg, int64_t *base, svint64_t offsets, svint64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, long* base, Vector offsets, Vector data) => ScatterNonTemporal(mask, base, offsets, data); + + /// + /// void svstnt1_scatter_[u64]offset[_s64](svbool_t pg, int64_t *base, svuint64_t offsets, svint64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, long* base, Vector offsets, Vector data) => ScatterNonTemporal(mask, base, offsets, data); + + /// + /// void svstnt1_scatter_[s64]index[_s64](svbool_t pg, int64_t *base, svint64_t indices, svint64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, long* base, Vector indices, Vector data) => ScatterNonTemporal(mask, base, indices, data); + + /// + /// void svstnt1_scatter_[u64]index[_s64](svbool_t pg, int64_t *base, svuint64_t indices, svint64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, long* base, Vector indices, Vector data) => ScatterNonTemporal(mask, base, indices, data); + + /// + /// void svstnt1_scatter[_u32base_u32](svbool_t pg, svuint32_t bases, svuint32_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, Vector addresses, Vector data) => ScatterNonTemporal(mask, addresses, data); + + /// + /// void svstnt1_scatter_[u32]offset[_u32](svbool_t pg, uint32_t *base, svuint32_t offsets, svuint32_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, uint* base, Vector offsets, Vector data) => ScatterNonTemporal(mask, base, offsets, data); + + /// + /// void svstnt1_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, Vector addresses, Vector data) => ScatterNonTemporal(mask, addresses, data); + + /// + /// void svstnt1_scatter_[s64]offset[_u64](svbool_t pg, uint64_t *base, svint64_t offsets, svuint64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, ulong* base, Vector offsets, Vector data) => ScatterNonTemporal(mask, base, offsets, data); + + /// + /// void svstnt1_scatter_[u64]offset[_u64](svbool_t pg, uint64_t *base, svuint64_t offsets, svuint64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, ulong* base, Vector offsets, Vector data) => ScatterNonTemporal(mask, base, offsets, data); + + /// + /// void svstnt1_scatter_[s64]index[_u64](svbool_t pg, uint64_t *base, svint64_t indices, svuint64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, ulong* base, Vector indices, Vector data) => ScatterNonTemporal(mask, base, indices, data); + + /// + /// void svstnt1_scatter_[u64]index[_u64](svbool_t pg, uint64_t *base, svuint64_t indices, svuint64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, ulong* base, Vector indices, Vector data) => ScatterNonTemporal(mask, base, indices, data); + + /// + /// void svstnt1_scatter[_u32base_f32](svbool_t pg, svuint32_t bases, svfloat32_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, Vector addresses, Vector data) => ScatterNonTemporal(mask, addresses, data); + + /// + /// void svstnt1_scatter_[u32]offset[_f32](svbool_t pg, float32_t *base, svuint32_t offsets, svfloat32_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, float* base, Vector offsets, Vector data) => ScatterNonTemporal(mask, base, offsets, data); + + /// + /// void svstnt1_scatter_[s64]offset[_f64](svbool_t pg, float64_t *base, svint64_t offsets, svfloat64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, double* base, Vector offsets, Vector data) => ScatterNonTemporal(mask, base, offsets, data); + + /// + /// void svstnt1_scatter_[s64]index[_f64](svbool_t pg, float64_t *base, svint64_t indices, svfloat64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, double* base, Vector indices, Vector data) => ScatterNonTemporal(mask, base, indices, data); + + /// + /// void svstnt1_scatter[_u64base_f64](svbool_t pg, svuint64_t bases, svfloat64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, Vector addresses, Vector data) => ScatterNonTemporal(mask, addresses, data); + + /// + /// void svstnt1_scatter_[u64]offset[_f64](svbool_t pg, float64_t *base, svuint64_t offsets, svfloat64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, double* base, Vector offsets, Vector data) => ScatterNonTemporal(mask, base, offsets, data); + + /// + /// void svstnt1_scatter_[u64]index[_f64](svbool_t pg, float64_t *base, svuint64_t indices, svfloat64_t data) + /// + public static unsafe void ScatterNonTemporal(Vector mask, double* base, Vector indices, Vector data) => ScatterNonTemporal(mask, base, indices, data); + + + /// ShiftArithmeticRounded : Rounding shift left + + /// + /// svint8_t svrshl[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svrshl[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svrshl[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector ShiftArithmeticRounded(Vector value, Vector count) => ShiftArithmeticRounded(value, count); + + /// + /// svint16_t svrshl[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svrshl[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svrshl[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector ShiftArithmeticRounded(Vector value, Vector count) => ShiftArithmeticRounded(value, count); + + /// + /// svint32_t svrshl[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svrshl[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svrshl[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector ShiftArithmeticRounded(Vector value, Vector count) => ShiftArithmeticRounded(value, count); + + /// + /// svint64_t svrshl[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svrshl[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svrshl[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector ShiftArithmeticRounded(Vector value, Vector count) => ShiftArithmeticRounded(value, count); + + + /// ShiftArithmeticRoundedSaturate : Saturating rounding shift left + + /// + /// svint8_t svqrshl[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svqrshl[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svqrshl[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector ShiftArithmeticRoundedSaturate(Vector value, Vector count) => ShiftArithmeticRoundedSaturate(value, count); + + /// + /// svint16_t svqrshl[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svqrshl[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svqrshl[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector ShiftArithmeticRoundedSaturate(Vector value, Vector count) => ShiftArithmeticRoundedSaturate(value, count); + + /// + /// svint32_t svqrshl[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svqrshl[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svqrshl[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector ShiftArithmeticRoundedSaturate(Vector value, Vector count) => ShiftArithmeticRoundedSaturate(value, count); + + /// + /// svint64_t svqrshl[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svqrshl[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svqrshl[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector ShiftArithmeticRoundedSaturate(Vector value, Vector count) => ShiftArithmeticRoundedSaturate(value, count); + + + /// ShiftArithmeticSaturate : Saturating shift left + + /// + /// svint8_t svqshl[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svqshl[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svqshl[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector ShiftArithmeticSaturate(Vector value, Vector count) => ShiftArithmeticSaturate(value, count); + + /// + /// svint16_t svqshl[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svqshl[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svqshl[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector ShiftArithmeticSaturate(Vector value, Vector count) => ShiftArithmeticSaturate(value, count); + + /// + /// svint32_t svqshl[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svqshl[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svqshl[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector ShiftArithmeticSaturate(Vector value, Vector count) => ShiftArithmeticSaturate(value, count); + + /// + /// svint64_t svqshl[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svqshl[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svqshl[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector ShiftArithmeticSaturate(Vector value, Vector count) => ShiftArithmeticSaturate(value, count); + + + /// ShiftLeftAndInsert : Shift left and insert + + /// + /// svint8_t svsli[_n_s8](svint8_t op1, svint8_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftLeftAndInsert(left, right, shift); + + /// + /// svint16_t svsli[_n_s16](svint16_t op1, svint16_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftLeftAndInsert(left, right, shift); + + /// + /// svint32_t svsli[_n_s32](svint32_t op1, svint32_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftLeftAndInsert(left, right, shift); + + /// + /// svint64_t svsli[_n_s64](svint64_t op1, svint64_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftLeftAndInsert(left, right, shift); + + /// + /// svuint8_t svsli[_n_u8](svuint8_t op1, svuint8_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftLeftAndInsert(left, right, shift); + + /// + /// svuint16_t svsli[_n_u16](svuint16_t op1, svuint16_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftLeftAndInsert(left, right, shift); + + /// + /// svuint32_t svsli[_n_u32](svuint32_t op1, svuint32_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftLeftAndInsert(left, right, shift); + + /// + /// svuint64_t svsli[_n_u64](svuint64_t op1, svuint64_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftLeftAndInsert(left, right, shift); + + + /// ShiftLeftLogicalSaturate : Saturating shift left + + /// + /// svuint8_t svqshl[_u8]_m(svbool_t pg, svuint8_t op1, svint8_t op2) + /// svuint8_t svqshl[_u8]_x(svbool_t pg, svuint8_t op1, svint8_t op2) + /// svuint8_t svqshl[_u8]_z(svbool_t pg, svuint8_t op1, svint8_t op2) + /// + public static unsafe Vector ShiftLeftLogicalSaturate(Vector value, Vector count) => ShiftLeftLogicalSaturate(value, count); + + /// + /// svuint16_t svqshl[_u16]_m(svbool_t pg, svuint16_t op1, svint16_t op2) + /// svuint16_t svqshl[_u16]_x(svbool_t pg, svuint16_t op1, svint16_t op2) + /// svuint16_t svqshl[_u16]_z(svbool_t pg, svuint16_t op1, svint16_t op2) + /// + public static unsafe Vector ShiftLeftLogicalSaturate(Vector value, Vector count) => ShiftLeftLogicalSaturate(value, count); + + /// + /// svuint32_t svqshl[_u32]_m(svbool_t pg, svuint32_t op1, svint32_t op2) + /// svuint32_t svqshl[_u32]_x(svbool_t pg, svuint32_t op1, svint32_t op2) + /// svuint32_t svqshl[_u32]_z(svbool_t pg, svuint32_t op1, svint32_t op2) + /// + public static unsafe Vector ShiftLeftLogicalSaturate(Vector value, Vector count) => ShiftLeftLogicalSaturate(value, count); + + /// + /// svuint64_t svqshl[_u64]_m(svbool_t pg, svuint64_t op1, svint64_t op2) + /// svuint64_t svqshl[_u64]_x(svbool_t pg, svuint64_t op1, svint64_t op2) + /// svuint64_t svqshl[_u64]_z(svbool_t pg, svuint64_t op1, svint64_t op2) + /// + public static unsafe Vector ShiftLeftLogicalSaturate(Vector value, Vector count) => ShiftLeftLogicalSaturate(value, count); + + + /// ShiftLeftLogicalSaturateUnsigned : Saturating shift left unsigned + + /// + /// svuint8_t svqshlu[_n_s8]_m(svbool_t pg, svint8_t op1, uint64_t imm2) + /// svuint8_t svqshlu[_n_s8]_x(svbool_t pg, svint8_t op1, uint64_t imm2) + /// svuint8_t svqshlu[_n_s8]_z(svbool_t pg, svint8_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalSaturateUnsigned(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalSaturateUnsigned(value, count); + + /// + /// svuint16_t svqshlu[_n_s16]_m(svbool_t pg, svint16_t op1, uint64_t imm2) + /// svuint16_t svqshlu[_n_s16]_x(svbool_t pg, svint16_t op1, uint64_t imm2) + /// svuint16_t svqshlu[_n_s16]_z(svbool_t pg, svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalSaturateUnsigned(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalSaturateUnsigned(value, count); + + /// + /// svuint32_t svqshlu[_n_s32]_m(svbool_t pg, svint32_t op1, uint64_t imm2) + /// svuint32_t svqshlu[_n_s32]_x(svbool_t pg, svint32_t op1, uint64_t imm2) + /// svuint32_t svqshlu[_n_s32]_z(svbool_t pg, svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalSaturateUnsigned(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalSaturateUnsigned(value, count); + + /// + /// svuint64_t svqshlu[_n_s64]_m(svbool_t pg, svint64_t op1, uint64_t imm2) + /// svuint64_t svqshlu[_n_s64]_x(svbool_t pg, svint64_t op1, uint64_t imm2) + /// svuint64_t svqshlu[_n_s64]_z(svbool_t pg, svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalSaturateUnsigned(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalSaturateUnsigned(value, count); + + + /// ShiftLeftLogicalWideningEven : Shift left long (bottom) + + /// + /// svint16_t svshllb[_n_s16](svint8_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalWideningEven(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalWideningEven(value, count); + + /// + /// svint32_t svshllb[_n_s32](svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalWideningEven(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalWideningEven(value, count); + + /// + /// svint64_t svshllb[_n_s64](svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalWideningEven(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalWideningEven(value, count); + + /// + /// svuint16_t svshllb[_n_u16](svuint8_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalWideningEven(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalWideningEven(value, count); + + /// + /// svuint32_t svshllb[_n_u32](svuint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalWideningEven(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalWideningEven(value, count); + + /// + /// svuint64_t svshllb[_n_u64](svuint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalWideningEven(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalWideningEven(value, count); + + + /// ShiftLeftLogicalWideningOdd : Shift left long (top) + + /// + /// svint16_t svshllt[_n_s16](svint8_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalWideningOdd(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalWideningOdd(value, count); + + /// + /// svint32_t svshllt[_n_s32](svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalWideningOdd(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalWideningOdd(value, count); + + /// + /// svint64_t svshllt[_n_s64](svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalWideningOdd(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalWideningOdd(value, count); + + /// + /// svuint16_t svshllt[_n_u16](svuint8_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalWideningOdd(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalWideningOdd(value, count); + + /// + /// svuint32_t svshllt[_n_u32](svuint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalWideningOdd(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalWideningOdd(value, count); + + /// + /// svuint64_t svshllt[_n_u64](svuint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftLeftLogicalWideningOdd(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalWideningOdd(value, count); + + + /// ShiftLogicalRounded : Rounding shift left + + /// + /// svuint8_t svrshl[_u8]_m(svbool_t pg, svuint8_t op1, svint8_t op2) + /// svuint8_t svrshl[_u8]_x(svbool_t pg, svuint8_t op1, svint8_t op2) + /// svuint8_t svrshl[_u8]_z(svbool_t pg, svuint8_t op1, svint8_t op2) + /// + public static unsafe Vector ShiftLogicalRounded(Vector value, Vector count) => ShiftLogicalRounded(value, count); + + /// + /// svuint16_t svrshl[_u16]_m(svbool_t pg, svuint16_t op1, svint16_t op2) + /// svuint16_t svrshl[_u16]_x(svbool_t pg, svuint16_t op1, svint16_t op2) + /// svuint16_t svrshl[_u16]_z(svbool_t pg, svuint16_t op1, svint16_t op2) + /// + public static unsafe Vector ShiftLogicalRounded(Vector value, Vector count) => ShiftLogicalRounded(value, count); + + /// + /// svuint32_t svrshl[_u32]_m(svbool_t pg, svuint32_t op1, svint32_t op2) + /// svuint32_t svrshl[_u32]_x(svbool_t pg, svuint32_t op1, svint32_t op2) + /// svuint32_t svrshl[_u32]_z(svbool_t pg, svuint32_t op1, svint32_t op2) + /// + public static unsafe Vector ShiftLogicalRounded(Vector value, Vector count) => ShiftLogicalRounded(value, count); + + /// + /// svuint64_t svrshl[_u64]_m(svbool_t pg, svuint64_t op1, svint64_t op2) + /// svuint64_t svrshl[_u64]_x(svbool_t pg, svuint64_t op1, svint64_t op2) + /// svuint64_t svrshl[_u64]_z(svbool_t pg, svuint64_t op1, svint64_t op2) + /// + public static unsafe Vector ShiftLogicalRounded(Vector value, Vector count) => ShiftLogicalRounded(value, count); + + + /// ShiftLogicalRoundedSaturate : Saturating rounding shift left + + /// + /// svuint8_t svqrshl[_u8]_m(svbool_t pg, svuint8_t op1, svint8_t op2) + /// svuint8_t svqrshl[_u8]_x(svbool_t pg, svuint8_t op1, svint8_t op2) + /// svuint8_t svqrshl[_u8]_z(svbool_t pg, svuint8_t op1, svint8_t op2) + /// + public static unsafe Vector ShiftLogicalRoundedSaturate(Vector value, Vector count) => ShiftLogicalRoundedSaturate(value, count); + + /// + /// svuint16_t svqrshl[_u16]_m(svbool_t pg, svuint16_t op1, svint16_t op2) + /// svuint16_t svqrshl[_u16]_x(svbool_t pg, svuint16_t op1, svint16_t op2) + /// svuint16_t svqrshl[_u16]_z(svbool_t pg, svuint16_t op1, svint16_t op2) + /// + public static unsafe Vector ShiftLogicalRoundedSaturate(Vector value, Vector count) => ShiftLogicalRoundedSaturate(value, count); + + /// + /// svuint32_t svqrshl[_u32]_m(svbool_t pg, svuint32_t op1, svint32_t op2) + /// svuint32_t svqrshl[_u32]_x(svbool_t pg, svuint32_t op1, svint32_t op2) + /// svuint32_t svqrshl[_u32]_z(svbool_t pg, svuint32_t op1, svint32_t op2) + /// + public static unsafe Vector ShiftLogicalRoundedSaturate(Vector value, Vector count) => ShiftLogicalRoundedSaturate(value, count); + + /// + /// svuint64_t svqrshl[_u64]_m(svbool_t pg, svuint64_t op1, svint64_t op2) + /// svuint64_t svqrshl[_u64]_x(svbool_t pg, svuint64_t op1, svint64_t op2) + /// svuint64_t svqrshl[_u64]_z(svbool_t pg, svuint64_t op1, svint64_t op2) + /// + public static unsafe Vector ShiftLogicalRoundedSaturate(Vector value, Vector count) => ShiftLogicalRoundedSaturate(value, count); + + + /// ShiftRightAndInsert : Shift right and insert + + /// + /// svint8_t svsri[_n_s8](svint8_t op1, svint8_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftRightAndInsert(left, right, shift); + + /// + /// svint16_t svsri[_n_s16](svint16_t op1, svint16_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftRightAndInsert(left, right, shift); + + /// + /// svint32_t svsri[_n_s32](svint32_t op1, svint32_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftRightAndInsert(left, right, shift); + + /// + /// svint64_t svsri[_n_s64](svint64_t op1, svint64_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftRightAndInsert(left, right, shift); + + /// + /// svuint8_t svsri[_n_u8](svuint8_t op1, svuint8_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftRightAndInsert(left, right, shift); + + /// + /// svuint16_t svsri[_n_u16](svuint16_t op1, svuint16_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftRightAndInsert(left, right, shift); + + /// + /// svuint32_t svsri[_n_u32](svuint32_t op1, svuint32_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftRightAndInsert(left, right, shift); + + /// + /// svuint64_t svsri[_n_u64](svuint64_t op1, svuint64_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftRightAndInsert(left, right, shift); + + + /// ShiftRightArithmeticAdd : Shift right and accumulate + + /// + /// svint8_t svsra[_n_s8](svint8_t op1, svint8_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightArithmeticAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticAdd(addend, value, count); + + /// + /// svint16_t svsra[_n_s16](svint16_t op1, svint16_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightArithmeticAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticAdd(addend, value, count); + + /// + /// svint32_t svsra[_n_s32](svint32_t op1, svint32_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightArithmeticAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticAdd(addend, value, count); + + /// + /// svint64_t svsra[_n_s64](svint64_t op1, svint64_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightArithmeticAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticAdd(addend, value, count); + + + /// ShiftRightArithmeticNarrowingSaturateEven : Saturating shift right narrow (bottom) + + /// + /// svint8_t svqshrnb[_n_s16](svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateEven(value, count); + + /// + /// svint16_t svqshrnb[_n_s32](svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateEven(value, count); + + /// + /// svint32_t svqshrnb[_n_s64](svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateEven(value, count); + + /// + /// svuint8_t svqshrnb[_n_u16](svuint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateEven(value, count); + + /// + /// svuint16_t svqshrnb[_n_u32](svuint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateEven(value, count); + + /// + /// svuint32_t svqshrnb[_n_u64](svuint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateEven(value, count); + + + /// ShiftRightArithmeticNarrowingSaturateOdd : Saturating shift right narrow (top) + + /// + /// svint8_t svqshrnt[_n_s16](svint8_t even, svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateOdd(even, value, count); + + /// + /// svint16_t svqshrnt[_n_s32](svint16_t even, svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateOdd(even, value, count); + + /// + /// svint32_t svqshrnt[_n_s64](svint32_t even, svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateOdd(even, value, count); + + /// + /// svuint8_t svqshrnt[_n_u16](svuint8_t even, svuint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateOdd(even, value, count); + + /// + /// svuint16_t svqshrnt[_n_u32](svuint16_t even, svuint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateOdd(even, value, count); + + /// + /// svuint32_t svqshrnt[_n_u64](svuint32_t even, svuint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateOdd(even, value, count); + + + /// ShiftRightArithmeticNarrowingSaturateUnsignedEven : Saturating shift right unsigned narrow (bottom) + + /// + /// svuint8_t svqshrunb[_n_s16](svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedEven(value, count); + + /// + /// svuint16_t svqshrunb[_n_s32](svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedEven(value, count); + + /// + /// svuint32_t svqshrunb[_n_s64](svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedEven(value, count); + + + /// ShiftRightArithmeticNarrowingSaturateUnsignedOdd : Saturating shift right unsigned narrow (top) + + /// + /// svuint8_t svqshrunt[_n_s16](svuint8_t even, svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedOdd(even, value, count); + + /// + /// svuint16_t svqshrunt[_n_s32](svuint16_t even, svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedOdd(even, value, count); + + /// + /// svuint32_t svqshrunt[_n_s64](svuint32_t even, svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedOdd(even, value, count); + + + /// ShiftRightArithmeticRounded : Rounding shift right + + /// + /// svint8_t svrshr[_n_s8]_m(svbool_t pg, svint8_t op1, uint64_t imm2) + /// svint8_t svrshr[_n_s8]_x(svbool_t pg, svint8_t op1, uint64_t imm2) + /// svint8_t svrshr[_n_s8]_z(svbool_t pg, svint8_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRounded(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRounded(value, count); + + /// + /// svint16_t svrshr[_n_s16]_m(svbool_t pg, svint16_t op1, uint64_t imm2) + /// svint16_t svrshr[_n_s16]_x(svbool_t pg, svint16_t op1, uint64_t imm2) + /// svint16_t svrshr[_n_s16]_z(svbool_t pg, svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRounded(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRounded(value, count); + + /// + /// svint32_t svrshr[_n_s32]_m(svbool_t pg, svint32_t op1, uint64_t imm2) + /// svint32_t svrshr[_n_s32]_x(svbool_t pg, svint32_t op1, uint64_t imm2) + /// svint32_t svrshr[_n_s32]_z(svbool_t pg, svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRounded(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRounded(value, count); + + /// + /// svint64_t svrshr[_n_s64]_m(svbool_t pg, svint64_t op1, uint64_t imm2) + /// svint64_t svrshr[_n_s64]_x(svbool_t pg, svint64_t op1, uint64_t imm2) + /// svint64_t svrshr[_n_s64]_z(svbool_t pg, svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRounded(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRounded(value, count); + + + /// ShiftRightArithmeticRoundedAdd : Rounding shift right and accumulate + + /// + /// svint8_t svrsra[_n_s8](svint8_t op1, svint8_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightArithmeticRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedAdd(addend, value, count); + + /// + /// svint16_t svrsra[_n_s16](svint16_t op1, svint16_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightArithmeticRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedAdd(addend, value, count); + + /// + /// svint32_t svrsra[_n_s32](svint32_t op1, svint32_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightArithmeticRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedAdd(addend, value, count); + + /// + /// svint64_t svrsra[_n_s64](svint64_t op1, svint64_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightArithmeticRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedAdd(addend, value, count); + + + /// ShiftRightArithmeticRoundedNarrowingSaturateEven : Saturating rounding shift right narrow (bottom) + + /// + /// svint8_t svqrshrnb[_n_s16](svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedNarrowingSaturateEven(value, count); + + /// + /// svint16_t svqrshrnb[_n_s32](svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedNarrowingSaturateEven(value, count); + + /// + /// svint32_t svqrshrnb[_n_s64](svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedNarrowingSaturateEven(value, count); + + + /// ShiftRightArithmeticRoundedNarrowingSaturateOdd : Saturating rounding shift right narrow (top) + + /// + /// svint8_t svqrshrnt[_n_s16](svint8_t even, svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedNarrowingSaturateOdd(even, value, count); + + /// + /// svint16_t svqrshrnt[_n_s32](svint16_t even, svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedNarrowingSaturateOdd(even, value, count); + + /// + /// svint32_t svqrshrnt[_n_s64](svint32_t even, svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedNarrowingSaturateOdd(even, value, count); + + + /// ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven : Saturating rounding shift right unsigned narrow (bottom) + + /// + /// svuint8_t svqrshrunb[_n_s16](svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven(value, count); + + /// + /// svuint16_t svqrshrunb[_n_s32](svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven(value, count); + + /// + /// svuint32_t svqrshrunb[_n_s64](svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven(value, count); + + + /// ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd : Saturating rounding shift right unsigned narrow (top) + + /// + /// svuint8_t svqrshrunt[_n_s16](svuint8_t even, svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd(even, value, count); + + /// + /// svuint16_t svqrshrunt[_n_s32](svuint16_t even, svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd(even, value, count); + + /// + /// svuint32_t svqrshrunt[_n_s64](svuint32_t even, svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd(even, value, count); + + + /// ShiftRightLogicalAdd : Shift right and accumulate + + /// + /// svuint8_t svsra[_n_u8](svuint8_t op1, svuint8_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightLogicalAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalAdd(addend, value, count); + + /// + /// svuint16_t svsra[_n_u16](svuint16_t op1, svuint16_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightLogicalAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalAdd(addend, value, count); + + /// + /// svuint32_t svsra[_n_u32](svuint32_t op1, svuint32_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightLogicalAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalAdd(addend, value, count); + + /// + /// svuint64_t svsra[_n_u64](svuint64_t op1, svuint64_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightLogicalAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalAdd(addend, value, count); + + + /// ShiftRightLogicalNarrowingEven : Shift right narrow (bottom) + + /// + /// svint8_t svshrnb[_n_s16](svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalNarrowingEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalNarrowingEven(value, count); + + /// + /// svint16_t svshrnb[_n_s32](svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalNarrowingEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalNarrowingEven(value, count); + + /// + /// svint32_t svshrnb[_n_s64](svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalNarrowingEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalNarrowingEven(value, count); + + /// + /// svuint8_t svshrnb[_n_u16](svuint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalNarrowingEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalNarrowingEven(value, count); + + /// + /// svuint16_t svshrnb[_n_u32](svuint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalNarrowingEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalNarrowingEven(value, count); + + /// + /// svuint32_t svshrnb[_n_u64](svuint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalNarrowingEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalNarrowingEven(value, count); + + + /// ShiftRightLogicalNarrowingOdd : Shift right narrow (top) + + /// + /// svint8_t svshrnt[_n_s16](svint8_t even, svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalNarrowingOdd(even, value, count); + + /// + /// svint16_t svshrnt[_n_s32](svint16_t even, svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalNarrowingOdd(even, value, count); + + /// + /// svint32_t svshrnt[_n_s64](svint32_t even, svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalNarrowingOdd(even, value, count); + + /// + /// svuint8_t svshrnt[_n_u16](svuint8_t even, svuint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalNarrowingOdd(even, value, count); + + /// + /// svuint16_t svshrnt[_n_u32](svuint16_t even, svuint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalNarrowingOdd(even, value, count); + + /// + /// svuint32_t svshrnt[_n_u64](svuint32_t even, svuint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalNarrowingOdd(even, value, count); + + + /// ShiftRightLogicalRounded : Rounding shift right + + /// + /// svuint8_t svrshr[_n_u8]_m(svbool_t pg, svuint8_t op1, uint64_t imm2) + /// svuint8_t svrshr[_n_u8]_x(svbool_t pg, svuint8_t op1, uint64_t imm2) + /// svuint8_t svrshr[_n_u8]_z(svbool_t pg, svuint8_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRounded(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRounded(value, count); + + /// + /// svuint16_t svrshr[_n_u16]_m(svbool_t pg, svuint16_t op1, uint64_t imm2) + /// svuint16_t svrshr[_n_u16]_x(svbool_t pg, svuint16_t op1, uint64_t imm2) + /// svuint16_t svrshr[_n_u16]_z(svbool_t pg, svuint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRounded(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRounded(value, count); + + /// + /// svuint32_t svrshr[_n_u32]_m(svbool_t pg, svuint32_t op1, uint64_t imm2) + /// svuint32_t svrshr[_n_u32]_x(svbool_t pg, svuint32_t op1, uint64_t imm2) + /// svuint32_t svrshr[_n_u32]_z(svbool_t pg, svuint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRounded(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRounded(value, count); + + /// + /// svuint64_t svrshr[_n_u64]_m(svbool_t pg, svuint64_t op1, uint64_t imm2) + /// svuint64_t svrshr[_n_u64]_x(svbool_t pg, svuint64_t op1, uint64_t imm2) + /// svuint64_t svrshr[_n_u64]_z(svbool_t pg, svuint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRounded(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRounded(value, count); + + + /// ShiftRightLogicalRoundedAdd : Rounding shift right and accumulate + + /// + /// svuint8_t svrsra[_n_u8](svuint8_t op1, svuint8_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightLogicalRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); + + /// + /// svuint16_t svrsra[_n_u16](svuint16_t op1, svuint16_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightLogicalRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); + + /// + /// svuint32_t svrsra[_n_u32](svuint32_t op1, svuint32_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightLogicalRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); + + /// + /// svuint64_t svrsra[_n_u64](svuint64_t op1, svuint64_t op2, uint64_t imm3) + /// + public static unsafe Vector ShiftRightLogicalRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); + + + /// ShiftRightLogicalRoundedNarrowingEven : Rounding shift right narrow (bottom) + + /// + /// svint8_t svrshrnb[_n_s16](svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingEven(value, count); + + /// + /// svint16_t svrshrnb[_n_s32](svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingEven(value, count); + + /// + /// svint32_t svrshrnb[_n_s64](svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingEven(value, count); + + /// + /// svuint8_t svrshrnb[_n_u16](svuint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingEven(value, count); + + /// + /// svuint16_t svrshrnb[_n_u32](svuint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingEven(value, count); + + /// + /// svuint32_t svrshrnb[_n_u64](svuint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingEven(value, count); + + + /// ShiftRightLogicalRoundedNarrowingOdd : Rounding shift right narrow (top) + + /// + /// svint8_t svrshrnt[_n_s16](svint8_t even, svint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingOdd(even, value, count); + + /// + /// svint16_t svrshrnt[_n_s32](svint16_t even, svint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingOdd(even, value, count); + + /// + /// svint32_t svrshrnt[_n_s64](svint32_t even, svint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingOdd(even, value, count); + + /// + /// svuint8_t svrshrnt[_n_u16](svuint8_t even, svuint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingOdd(even, value, count); + + /// + /// svuint16_t svrshrnt[_n_u32](svuint16_t even, svuint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingOdd(even, value, count); + + /// + /// svuint32_t svrshrnt[_n_u64](svuint32_t even, svuint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingOdd(even, value, count); + + + /// ShiftRightLogicalRoundedNarrowingSaturateEven : Saturating rounding shift right narrow (bottom) + + /// + /// svuint8_t svqrshrnb[_n_u16](svuint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingSaturateEven(value, count); + + /// + /// svuint16_t svqrshrnb[_n_u32](svuint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingSaturateEven(value, count); + + /// + /// svuint32_t svqrshrnb[_n_u64](svuint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingSaturateEven(value, count); + + + /// ShiftRightLogicalRoundedNarrowingSaturateOdd : Saturating rounding shift right narrow (top) + + /// + /// svuint8_t svqrshrnt[_n_u16](svuint8_t even, svuint16_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingSaturateOdd(even, value, count); + + /// + /// svuint16_t svqrshrnt[_n_u32](svuint16_t even, svuint32_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingSaturateOdd(even, value, count); + + /// + /// svuint32_t svqrshrnt[_n_u64](svuint32_t even, svuint64_t op1, uint64_t imm2) + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingSaturateOdd(even, value, count); + + + /// SubtractHighNarowingLower : Subtract narrow high part (bottom) + + /// + /// svint8_t svsubhnb[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector SubtractHighNarowingLower(Vector left, Vector right) => SubtractHighNarowingLower(left, right); + + /// + /// svint16_t svsubhnb[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector SubtractHighNarowingLower(Vector left, Vector right) => SubtractHighNarowingLower(left, right); + + /// + /// svint32_t svsubhnb[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector SubtractHighNarowingLower(Vector left, Vector right) => SubtractHighNarowingLower(left, right); + + /// + /// svuint8_t svsubhnb[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector SubtractHighNarowingLower(Vector left, Vector right) => SubtractHighNarowingLower(left, right); + + /// + /// svuint16_t svsubhnb[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector SubtractHighNarowingLower(Vector left, Vector right) => SubtractHighNarowingLower(left, right); + + /// + /// svuint32_t svsubhnb[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector SubtractHighNarowingLower(Vector left, Vector right) => SubtractHighNarowingLower(left, right); + + + /// SubtractHighNarowingUpper : Subtract narrow high part (top) + + /// + /// svint8_t svsubhnt[_s16](svint8_t even, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, Vector right) => SubtractHighNarowingUpper(even, left, right); + + /// + /// svint16_t svsubhnt[_s32](svint16_t even, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, Vector right) => SubtractHighNarowingUpper(even, left, right); + + /// + /// svint32_t svsubhnt[_s64](svint32_t even, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, Vector right) => SubtractHighNarowingUpper(even, left, right); + + /// + /// svuint8_t svsubhnt[_u16](svuint8_t even, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, Vector right) => SubtractHighNarowingUpper(even, left, right); + + /// + /// svuint16_t svsubhnt[_u32](svuint16_t even, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, Vector right) => SubtractHighNarowingUpper(even, left, right); + + /// + /// svuint32_t svsubhnt[_u64](svuint32_t even, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, Vector right) => SubtractHighNarowingUpper(even, left, right); + + + /// SubtractSaturate : Saturating subtract + + /// + /// svint8_t svqsub[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svqsub[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svqsub[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svint16_t svqsub[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svqsub[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svqsub[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svint32_t svqsub[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svqsub[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svqsub[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svint64_t svqsub[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svqsub[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svqsub[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svuint8_t svqsub[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svqsub[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svqsub[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svuint16_t svqsub[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svqsub[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svqsub[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svuint32_t svqsub[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svqsub[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svqsub[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svuint64_t svqsub[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svqsub[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svqsub[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + + /// SubtractSaturateReversed : Saturating subtract reversed + + /// + /// svint8_t svqsubr[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svqsubr[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// svint8_t svqsubr[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right) => SubtractSaturateReversed(left, right); + + /// + /// svint16_t svqsubr[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svqsubr[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// svint16_t svqsubr[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right) => SubtractSaturateReversed(left, right); + + /// + /// svint32_t svqsubr[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svqsubr[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// svint32_t svqsubr[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right) => SubtractSaturateReversed(left, right); + + /// + /// svint64_t svqsubr[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svqsubr[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// svint64_t svqsubr[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right) => SubtractSaturateReversed(left, right); + + /// + /// svuint8_t svqsubr[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svqsubr[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// svuint8_t svqsubr[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right) => SubtractSaturateReversed(left, right); + + /// + /// svuint16_t svqsubr[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svqsubr[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// svuint16_t svqsubr[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right) => SubtractSaturateReversed(left, right); + + /// + /// svuint32_t svqsubr[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svqsubr[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// svuint32_t svqsubr[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right) => SubtractSaturateReversed(left, right); + + /// + /// svuint64_t svqsubr[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svqsubr[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// svuint64_t svqsubr[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right) => SubtractSaturateReversed(left, right); + + + /// SubtractWideLower : Subtract wide (bottom) + + /// + /// svint16_t svsubwb[_s16](svint16_t op1, svint8_t op2) + /// + public static unsafe Vector SubtractWideLower(Vector left, Vector right) => SubtractWideLower(left, right); + + /// + /// svint32_t svsubwb[_s32](svint32_t op1, svint16_t op2) + /// + public static unsafe Vector SubtractWideLower(Vector left, Vector right) => SubtractWideLower(left, right); + + /// + /// svint64_t svsubwb[_s64](svint64_t op1, svint32_t op2) + /// + public static unsafe Vector SubtractWideLower(Vector left, Vector right) => SubtractWideLower(left, right); + + /// + /// svuint16_t svsubwb[_u16](svuint16_t op1, svuint8_t op2) + /// + public static unsafe Vector SubtractWideLower(Vector left, Vector right) => SubtractWideLower(left, right); + + /// + /// svuint32_t svsubwb[_u32](svuint32_t op1, svuint16_t op2) + /// + public static unsafe Vector SubtractWideLower(Vector left, Vector right) => SubtractWideLower(left, right); + + /// + /// svuint64_t svsubwb[_u64](svuint64_t op1, svuint32_t op2) + /// + public static unsafe Vector SubtractWideLower(Vector left, Vector right) => SubtractWideLower(left, right); + + + /// SubtractWideUpper : Subtract wide (top) + + /// + /// svint16_t svsubwt[_s16](svint16_t op1, svint8_t op2) + /// + public static unsafe Vector SubtractWideUpper(Vector left, Vector right) => SubtractWideUpper(left, right); + + /// + /// svint32_t svsubwt[_s32](svint32_t op1, svint16_t op2) + /// + public static unsafe Vector SubtractWideUpper(Vector left, Vector right) => SubtractWideUpper(left, right); + + /// + /// svint64_t svsubwt[_s64](svint64_t op1, svint32_t op2) + /// + public static unsafe Vector SubtractWideUpper(Vector left, Vector right) => SubtractWideUpper(left, right); + + /// + /// svuint16_t svsubwt[_u16](svuint16_t op1, svuint8_t op2) + /// + public static unsafe Vector SubtractWideUpper(Vector left, Vector right) => SubtractWideUpper(left, right); + + /// + /// svuint32_t svsubwt[_u32](svuint32_t op1, svuint16_t op2) + /// + public static unsafe Vector SubtractWideUpper(Vector left, Vector right) => SubtractWideUpper(left, right); + + /// + /// svuint64_t svsubwt[_u64](svuint64_t op1, svuint32_t op2) + /// + public static unsafe Vector SubtractWideUpper(Vector left, Vector right) => SubtractWideUpper(left, right); + + + /// SubtractWideningLower : Subtract long (bottom) + + /// + /// svint16_t svsublb[_s16](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector SubtractWideningLower(Vector left, Vector right) => SubtractWideningLower(left, right); + + /// + /// svint32_t svsublb[_s32](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector SubtractWideningLower(Vector left, Vector right) => SubtractWideningLower(left, right); + + /// + /// svint64_t svsublb[_s64](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector SubtractWideningLower(Vector left, Vector right) => SubtractWideningLower(left, right); + + /// + /// svuint16_t svsublb[_u16](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector SubtractWideningLower(Vector left, Vector right) => SubtractWideningLower(left, right); + + /// + /// svuint32_t svsublb[_u32](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector SubtractWideningLower(Vector left, Vector right) => SubtractWideningLower(left, right); + + /// + /// svuint64_t svsublb[_u64](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector SubtractWideningLower(Vector left, Vector right) => SubtractWideningLower(left, right); + + + /// SubtractWideningLowerUpper : Subtract long (bottom - top) + + /// + /// svint16_t svsublbt[_s16](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector SubtractWideningLowerUpper(Vector left, Vector right) => SubtractWideningLowerUpper(left, right); + + /// + /// svint32_t svsublbt[_s32](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector SubtractWideningLowerUpper(Vector left, Vector right) => SubtractWideningLowerUpper(left, right); + + /// + /// svint64_t svsublbt[_s64](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector SubtractWideningLowerUpper(Vector left, Vector right) => SubtractWideningLowerUpper(left, right); + + + /// SubtractWideningUpper : Subtract long (top) + + /// + /// svint16_t svsublt[_s16](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector SubtractWideningUpper(Vector left, Vector right) => SubtractWideningUpper(left, right); + + /// + /// svint32_t svsublt[_s32](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector SubtractWideningUpper(Vector left, Vector right) => SubtractWideningUpper(left, right); + + /// + /// svint64_t svsublt[_s64](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector SubtractWideningUpper(Vector left, Vector right) => SubtractWideningUpper(left, right); + + /// + /// svuint16_t svsublt[_u16](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector SubtractWideningUpper(Vector left, Vector right) => SubtractWideningUpper(left, right); + + /// + /// svuint32_t svsublt[_u32](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector SubtractWideningUpper(Vector left, Vector right) => SubtractWideningUpper(left, right); + + /// + /// svuint64_t svsublt[_u64](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector SubtractWideningUpper(Vector left, Vector right) => SubtractWideningUpper(left, right); + + + /// SubtractWideningUpperLower : Subtract long (top - bottom) + + /// + /// svint16_t svsubltb[_s16](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector SubtractWideningUpperLower(Vector left, Vector right) => SubtractWideningUpperLower(left, right); + + /// + /// svint32_t svsubltb[_s32](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector SubtractWideningUpperLower(Vector left, Vector right) => SubtractWideningUpperLower(left, right); + + /// + /// svint64_t svsubltb[_s64](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector SubtractWideningUpperLower(Vector left, Vector right) => SubtractWideningUpperLower(left, right); + + + /// SubtractWithBorrowWideningLower : Subtract with borrow long (bottom) + + /// + /// svuint32_t svsbclb[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector SubtractWithBorrowWideningLower(Vector op1, Vector op2, Vector op3) => SubtractWithBorrowWideningLower(op1, op2, op3); + + /// + /// svuint64_t svsbclb[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// + public static unsafe Vector SubtractWithBorrowWideningLower(Vector op1, Vector op2, Vector op3) => SubtractWithBorrowWideningLower(op1, op2, op3); + + + /// SubtractWithBorrowWideningUpper : Subtract with borrow long (top) + + /// + /// svuint32_t svsbclt[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector SubtractWithBorrowWideningUpper(Vector op1, Vector op2, Vector op3) => SubtractWithBorrowWideningUpper(op1, op2, op3); + + /// + /// svuint64_t svsbclt[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// + public static unsafe Vector SubtractWithBorrowWideningUpper(Vector op1, Vector op2, Vector op3) => SubtractWithBorrowWideningUpper(op1, op2, op3); + + + /// UpConvertWideningUpper : Up convert long (top) + + /// + /// svfloat64_t svcvtlt_f64[_f32]_m(svfloat64_t inactive, svbool_t pg, svfloat32_t op) + /// svfloat64_t svcvtlt_f64[_f32]_x(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector UpConvertWideningUpper(Vector value) => UpConvertWideningUpper(value); + + + /// VectorTableLookup : Table lookup in two-vector table + + /// + /// svint8_t svtbl2[_s8](svint8x2_t data, svuint8_t indices) + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) => VectorTableLookup(data1,, indices); + + /// + /// svint16_t svtbl2[_s16](svint16x2_t data, svuint16_t indices) + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) => VectorTableLookup(data1,, indices); + + /// + /// svint32_t svtbl2[_s32](svint32x2_t data, svuint32_t indices) + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) => VectorTableLookup(data1,, indices); + + /// + /// svint64_t svtbl2[_s64](svint64x2_t data, svuint64_t indices) + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) => VectorTableLookup(data1,, indices); + + /// + /// svuint8_t svtbl2[_u8](svuint8x2_t data, svuint8_t indices) + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) => VectorTableLookup(data1,, indices); + + /// + /// svuint16_t svtbl2[_u16](svuint16x2_t data, svuint16_t indices) + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) => VectorTableLookup(data1,, indices); + + /// + /// svuint32_t svtbl2[_u32](svuint32x2_t data, svuint32_t indices) + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) => VectorTableLookup(data1,, indices); + + /// + /// svuint64_t svtbl2[_u64](svuint64x2_t data, svuint64_t indices) + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) => VectorTableLookup(data1,, indices); + + /// + /// svfloat32_t svtbl2[_f32](svfloat32x2_t data, svuint32_t indices) + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) => VectorTableLookup(data1,, indices); + + /// + /// svfloat64_t svtbl2[_f64](svfloat64x2_t data, svuint64_t indices) + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) => VectorTableLookup(data1,, indices); + + + /// VectorTableLookupExtension : Table lookup in single-vector table (merging) + + /// + /// svint8_t svtbx[_s8](svint8_t fallback, svint8_t data, svuint8_t indices) + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) => VectorTableLookupExtension(fallback, data, indices); + + /// + /// svint16_t svtbx[_s16](svint16_t fallback, svint16_t data, svuint16_t indices) + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) => VectorTableLookupExtension(fallback, data, indices); + + /// + /// svint32_t svtbx[_s32](svint32_t fallback, svint32_t data, svuint32_t indices) + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) => VectorTableLookupExtension(fallback, data, indices); + + /// + /// svint64_t svtbx[_s64](svint64_t fallback, svint64_t data, svuint64_t indices) + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) => VectorTableLookupExtension(fallback, data, indices); + + /// + /// svuint8_t svtbx[_u8](svuint8_t fallback, svuint8_t data, svuint8_t indices) + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) => VectorTableLookupExtension(fallback, data, indices); + + /// + /// svuint16_t svtbx[_u16](svuint16_t fallback, svuint16_t data, svuint16_t indices) + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) => VectorTableLookupExtension(fallback, data, indices); + + /// + /// svuint32_t svtbx[_u32](svuint32_t fallback, svuint32_t data, svuint32_t indices) + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) => VectorTableLookupExtension(fallback, data, indices); + + /// + /// svuint64_t svtbx[_u64](svuint64_t fallback, svuint64_t data, svuint64_t indices) + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) => VectorTableLookupExtension(fallback, data, indices); + + /// + /// svfloat32_t svtbx[_f32](svfloat32_t fallback, svfloat32_t data, svuint32_t indices) + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) => VectorTableLookupExtension(fallback, data, indices); + + /// + /// svfloat64_t svtbx[_f64](svfloat64_t fallback, svfloat64_t data, svuint64_t indices) + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) => VectorTableLookupExtension(fallback, data, indices); + + + /// Xor : Bitwise exclusive OR of three vectors + + /// + /// svint8_t sveor3[_s8](svint8_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3) => Xor(value1, value2, value3); + + /// + /// svint16_t sveor3[_s16](svint16_t op1, svint16_t op2, svint16_t op3) + /// + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3) => Xor(value1, value2, value3); + + /// + /// svint32_t sveor3[_s32](svint32_t op1, svint32_t op2, svint32_t op3) + /// + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3) => Xor(value1, value2, value3); + + /// + /// svint64_t sveor3[_s64](svint64_t op1, svint64_t op2, svint64_t op3) + /// + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3) => Xor(value1, value2, value3); + + /// + /// svuint8_t sveor3[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3) => Xor(value1, value2, value3); + + /// + /// svuint16_t sveor3[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3) => Xor(value1, value2, value3); + + /// + /// svuint32_t sveor3[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3) => Xor(value1, value2, value3); + + /// + /// svuint64_t sveor3[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3) => Xor(value1, value2, value3); + + + /// XorRotateRight : Bitwise exclusive OR and rotate right + + /// + /// svint8_t svxar[_n_s8](svint8_t op1, svint8_t op2, uint64_t imm3) + /// + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count) => XorRotateRight(left, right, count); + + /// + /// svint16_t svxar[_n_s16](svint16_t op1, svint16_t op2, uint64_t imm3) + /// + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count) => XorRotateRight(left, right, count); + + /// + /// svint32_t svxar[_n_s32](svint32_t op1, svint32_t op2, uint64_t imm3) + /// + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count) => XorRotateRight(left, right, count); + + /// + /// svint64_t svxar[_n_s64](svint64_t op1, svint64_t op2, uint64_t imm3) + /// + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count) => XorRotateRight(left, right, count); + + /// + /// svuint8_t svxar[_n_u8](svuint8_t op1, svuint8_t op2, uint64_t imm3) + /// + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count) => XorRotateRight(left, right, count); + + /// + /// svuint16_t svxar[_n_u16](svuint16_t op1, svuint16_t op2, uint64_t imm3) + /// + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count) => XorRotateRight(left, right, count); + + /// + /// svuint32_t svxar[_n_u32](svuint32_t op1, svuint32_t op2, uint64_t imm3) + /// + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count) => XorRotateRight(left, right, count); + + /// + /// svuint64_t svxar[_n_u64](svuint64_t op1, svuint64_t op2, uint64_t imm3) + /// + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count) => XorRotateRight(left, right, count); + + } +} + diff --git a/sve_api/out_cs_api/SveAes.PlatformNotSupported.cs b/sve_api/out_cs_api/SveAes.PlatformNotSupported.cs new file mode 100644 index 0000000000000..10b97449ab090 --- /dev/null +++ b/sve_api/out_cs_api/SveAes.PlatformNotSupported.cs @@ -0,0 +1,79 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveAes : AdvSimd + { + internal SveAes() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// AesInverseMixColumns : AES inverse mix columns + + /// + /// svuint8_t svaesimc[_u8](svuint8_t op) + /// + public static unsafe Vector AesInverseMixColumns(Vector value) { throw new PlatformNotSupportedException(); } + + + /// AesMixColumns : AES mix columns + + /// + /// svuint8_t svaesmc[_u8](svuint8_t op) + /// + public static unsafe Vector AesMixColumns(Vector value) { throw new PlatformNotSupportedException(); } + + + /// AesSingleRoundDecryption : AES single round decryption + + /// + /// svuint8_t svaesd[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector AesSingleRoundDecryption(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AesSingleRoundEncryption : AES single round encryption + + /// + /// svuint8_t svaese[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector AesSingleRoundEncryption(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// PolynomialMultiplyWideningLower : Polynomial multiply long (bottom) + + /// + /// svuint64_t svpmullb_pair[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// PolynomialMultiplyWideningUpper : Polynomial multiply long (top) + + /// + /// svuint64_t svpmullt_pair[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + } +} + diff --git a/sve_api/out_cs_api/SveAes.System.Runtime.Intrinsics.cs b/sve_api/out_cs_api/SveAes.System.Runtime.Intrinsics.cs new file mode 100644 index 0000000000000..e6e7bdc3413cd --- /dev/null +++ b/sve_api/out_cs_api/SveAes.System.Runtime.Intrinsics.cs @@ -0,0 +1,7 @@ + public static System.Numerics.Vector AesInverseMixColumns(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector AesMixColumns(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector AesSingleRoundDecryption(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AesSingleRoundEncryption(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector PolynomialMultiplyWideningLower(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector PolynomialMultiplyWideningUpper(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + diff --git a/sve_api/out_cs_api/SveAes.cs b/sve_api/out_cs_api/SveAes.cs new file mode 100644 index 0000000000000..84043a27fd627 --- /dev/null +++ b/sve_api/out_cs_api/SveAes.cs @@ -0,0 +1,79 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveAes : AdvSimd + { + internal SveAes() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// AesInverseMixColumns : AES inverse mix columns + + /// + /// svuint8_t svaesimc[_u8](svuint8_t op) + /// + public static unsafe Vector AesInverseMixColumns(Vector value) => AesInverseMixColumns(value); + + + /// AesMixColumns : AES mix columns + + /// + /// svuint8_t svaesmc[_u8](svuint8_t op) + /// + public static unsafe Vector AesMixColumns(Vector value) => AesMixColumns(value); + + + /// AesSingleRoundDecryption : AES single round decryption + + /// + /// svuint8_t svaesd[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector AesSingleRoundDecryption(Vector left, Vector right) => AesSingleRoundDecryption(left, right); + + + /// AesSingleRoundEncryption : AES single round encryption + + /// + /// svuint8_t svaese[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector AesSingleRoundEncryption(Vector left, Vector right) => AesSingleRoundEncryption(left, right); + + + /// PolynomialMultiplyWideningLower : Polynomial multiply long (bottom) + + /// + /// svuint64_t svpmullb_pair[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, Vector right) => PolynomialMultiplyWideningLower(left, right); + + + /// PolynomialMultiplyWideningUpper : Polynomial multiply long (top) + + /// + /// svuint64_t svpmullt_pair[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, Vector right) => PolynomialMultiplyWideningUpper(left, right); + + } +} + diff --git a/sve_api/out_cs_api/SveBf16.PlatformNotSupported.cs b/sve_api/out_cs_api/SveBf16.PlatformNotSupported.cs new file mode 100644 index 0000000000000..6bd5e96e75190 --- /dev/null +++ b/sve_api/out_cs_api/SveBf16.PlatformNotSupported.cs @@ -0,0 +1,496 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveBf16 : AdvSimd + { + internal SveBf16() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// Bfloat16DotProduct : BFloat16 dot product + + /// + /// svfloat32_t svbfdot[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3) + /// + public static unsafe Vector Bfloat16DotProduct(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// Bfloat16MatrixMultiplyAccumulate : BFloat16 matrix multiply-accumulate + + /// + /// svfloat32_t svbfmmla[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3) + /// + public static unsafe Vector Bfloat16MatrixMultiplyAccumulate(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + + /// Bfloat16MultiplyAddWideningToSinglePrecisionLower : BFloat16 multiply-add long to single-precision (bottom) + + /// + /// svfloat32_t svbfmlalb[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3) + /// + public static unsafe Vector Bfloat16MultiplyAddWideningToSinglePrecisionLower(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svbfmlalb_lane[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3, uint64_t imm_index) + /// + public static unsafe Vector Bfloat16MultiplyAddWideningToSinglePrecisionLower(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + + /// Bfloat16MultiplyAddWideningToSinglePrecisionUpper : BFloat16 multiply-add long to single-precision (top) + + /// + /// svfloat32_t svbfmlalt[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3) + /// + public static unsafe Vector Bfloat16MultiplyAddWideningToSinglePrecisionUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svbfmlalt_lane[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3, uint64_t imm_index) + /// + public static unsafe Vector Bfloat16MultiplyAddWideningToSinglePrecisionUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + + /// ConcatenateEvenInt128FromTwoInputs : Concatenate even quadwords from two inputs + + /// + /// svbfloat16_t svuzp1q[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// ConcatenateOddInt128FromTwoInputs : Concatenate odd quadwords from two inputs + + /// + /// svbfloat16_t svuzp2q[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// ConditionalExtractAfterLastActiveElement : Conditionally extract element after last + + /// + /// svbfloat16_t svclasta[_bf16](svbool_t pg, svbfloat16_t fallback, svbfloat16_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16_t svclasta[_bf16](svbool_t pg, svbfloat16_t fallback, svbfloat16_t data) + /// bfloat16_t svclasta[_n_bf16](svbool_t pg, bfloat16_t fallback, svbfloat16_t data) + /// + public static unsafe bfloat16 ConditionalExtractAfterLastActiveElement(Vector mask, bfloat16 defaultValues, Vector data) { throw new PlatformNotSupportedException(); } + + + /// ConditionalExtractAfterLastActiveElementAndReplicate : Conditionally extract element after last + + /// + /// svbfloat16_t svclasta[_bf16](svbool_t pg, svbfloat16_t fallback, svbfloat16_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) { throw new PlatformNotSupportedException(); } + + + /// ConditionalExtractLastActiveElement : Conditionally extract last element + + /// + /// svbfloat16_t svclastb[_bf16](svbool_t pg, svbfloat16_t fallback, svbfloat16_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16_t svclastb[_bf16](svbool_t pg, svbfloat16_t fallback, svbfloat16_t data) + /// bfloat16_t svclastb[_n_bf16](svbool_t pg, bfloat16_t fallback, svbfloat16_t data) + /// + public static unsafe bfloat16 ConditionalExtractLastActiveElement(Vector mask, bfloat16 defaultValues, Vector data) { throw new PlatformNotSupportedException(); } + + + /// ConditionalExtractLastActiveElementAndReplicate : Conditionally extract last element + + /// + /// svbfloat16_t svclastb[_bf16](svbool_t pg, svbfloat16_t fallback, svbfloat16_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) { throw new PlatformNotSupportedException(); } + + + /// ConditionalSelect : Conditionally select elements + + /// + /// svbfloat16_t svsel[_bf16](svbool_t pg, svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// ConvertToBFloat16 : Floating-point convert + + /// + /// svbfloat16_t svcvt_bf16[_f32]_m(svbfloat16_t inactive, svbool_t pg, svfloat32_t op) + /// svbfloat16_t svcvt_bf16[_f32]_x(svbool_t pg, svfloat32_t op) + /// svbfloat16_t svcvt_bf16[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector ConvertToBFloat16(Vector value) { throw new PlatformNotSupportedException(); } + + + + /// CreateFalseMaskBFloat16 : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// + public static unsafe Vector CreateFalseMaskBFloat16() { throw new PlatformNotSupportedException(); } + + + /// CreateTrueMaskBFloat16 : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b8(enum svpattern pattern) + /// + public static unsafe Vector CreateTrueMaskBFloat16([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + + /// CreateWhileReadAfterWriteMask : While free of read-after-write conflicts + + /// + /// svbool_t svwhilerw[_bf16](const bfloat16_t *op1, const bfloat16_t *op2) + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(bfloat16* left, bfloat16* right) { throw new PlatformNotSupportedException(); } + + + /// CreateWhileWriteAfterReadMask : While free of write-after-read conflicts + + /// + /// svbool_t svwhilewr[_bf16](const bfloat16_t *op1, const bfloat16_t *op2) + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(bfloat16* left, bfloat16* right) { throw new PlatformNotSupportedException(); } + + + /// DotProductBySelectedScalar : BFloat16 dot product + + /// + /// svfloat32_t svbfdot_lane[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3, uint64_t imm_index) + /// + public static unsafe Vector DotProductBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + + /// DownConvertNarrowingUpper : Down convert and narrow (top) + + /// + /// svbfloat16_t svcvtnt_bf16[_f32]_m(svbfloat16_t even, svbool_t pg, svfloat32_t op) + /// svbfloat16_t svcvtnt_bf16[_f32]_x(svbfloat16_t even, svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector DownConvertNarrowingUpper(Vector value) { throw new PlatformNotSupportedException(); } + + + /// DuplicateSelectedScalarToVector : Broadcast a scalar value + + /// + /// svbfloat16_t svdup_lane[_bf16](svbfloat16_t data, uint16_t index) + /// svbfloat16_t svdupq_lane[_bf16](svbfloat16_t data, uint64_t index) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + + /// ExtractAfterLastScalar : Extract element after last + + /// + /// bfloat16_t svlasta[_bf16](svbool_t pg, svbfloat16_t op) + /// + public static unsafe bfloat16 ExtractAfterLastScalar(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ExtractAfterLastVector : Extract element after last + + /// + /// bfloat16_t svlasta[_bf16](svbool_t pg, svbfloat16_t op) + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ExtractLastScalar : Extract last element + + /// + /// bfloat16_t svlastb[_bf16](svbool_t pg, svbfloat16_t op) + /// + public static unsafe bfloat16 ExtractLastScalar(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ExtractLastVector : Extract last element + + /// + /// bfloat16_t svlastb[_bf16](svbool_t pg, svbfloat16_t op) + /// + public static unsafe Vector ExtractLastVector(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ExtractVector : Extract vector from pair of vectors + + /// + /// svbfloat16_t svext[_bf16](svbfloat16_t op1, svbfloat16_t op2, uint64_t imm3) + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + + /// GetActiveElementCount : Count set predicate bits + + /// + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) { throw new PlatformNotSupportedException(); } + + + /// InsertIntoShiftedVector : Insert scalar into shifted vector + + /// + /// svbfloat16_t svinsr[_n_bf16](svbfloat16_t op1, bfloat16_t op2) + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, bfloat16 right) { throw new PlatformNotSupportedException(); } + + + /// InterleaveEvenInt128FromTwoInputs : Interleave even quadwords from two inputs + + /// + /// svbfloat16_t svtrn1q[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// InterleaveInt128FromHighHalvesOfTwoInputs : Interleave quadwords from high halves of two inputs + + /// + /// svbfloat16_t svzip2q[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// InterleaveInt128FromLowHalvesOfTwoInputs : Interleave quadwords from low halves of two inputs + + /// + /// svbfloat16_t svzip1q[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// InterleaveOddInt128FromTwoInputs : Interleave odd quadwords from two inputs + + /// + /// svbfloat16_t svtrn2q[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// LoadVector : Unextended load + + /// + /// svbfloat16_t svld1[_bf16](svbool_t pg, const bfloat16_t *base) + /// + public static unsafe Vector LoadVector(Vector mask, bfloat16* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVector128AndReplicateToVector : Load and replicate 128 bits of data + + /// + /// svbfloat16_t svld1rq[_bf16](svbool_t pg, const bfloat16_t *base) + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, bfloat16* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVector256AndReplicateToVector : Load and replicate 256 bits of data + + /// + /// svbfloat16_t svld1ro[_bf16](svbool_t pg, const bfloat16_t *base) + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, bfloat16* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorFirstFaulting : Unextended load, first-faulting + + /// + /// svbfloat16_t svldff1[_bf16](svbool_t pg, const bfloat16_t *base) + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, bfloat16* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorNonFaulting : Unextended load, non-faulting + + /// + /// svbfloat16_t svldnf1[_bf16](svbool_t pg, const bfloat16_t *base) + /// + public static unsafe Vector LoadVectorNonFaulting(bfloat16* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorNonTemporal : Unextended load, non-temporal + + /// + /// svbfloat16_t svldnt1[_bf16](svbool_t pg, const bfloat16_t *base) + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, bfloat16* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorx2 : Load two-element tuples into two vectors + + /// + /// svbfloat16x2_t svld2[_bf16](svbool_t pg, const bfloat16_t *base) + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, bfloat16* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorx3 : Load three-element tuples into three vectors + + /// + /// svbfloat16x3_t svld3[_bf16](svbool_t pg, const bfloat16_t *base) + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, bfloat16* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorx4 : Load four-element tuples into four vectors + + /// + /// svbfloat16x4_t svld4[_bf16](svbool_t pg, const bfloat16_t *base) + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, bfloat16* address) { throw new PlatformNotSupportedException(); } + + + /// PopCount : Count nonzero bits + + /// + /// svuint16_t svcnt[_bf16]_m(svuint16_t inactive, svbool_t pg, svbfloat16_t op) + /// svuint16_t svcnt[_bf16]_x(svbool_t pg, svbfloat16_t op) + /// svuint16_t svcnt[_bf16]_z(svbool_t pg, svbfloat16_t op) + /// + public static unsafe Vector PopCount(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ReverseElement : Reverse all elements + + /// + /// svbfloat16_t svrev[_bf16](svbfloat16_t op) + /// + public static unsafe Vector ReverseElement(Vector value) { throw new PlatformNotSupportedException(); } + + + /// Splice : Splice two vectors under predicate control + + /// + /// svbfloat16_t svsplice[_bf16](svbool_t pg, svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// Store : Non-truncating store + + /// + /// void svst1[_bf16](svbool_t pg, bfloat16_t *base, svbfloat16_t data) + /// + public static unsafe void Store(Vector mask, bfloat16* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst2[_bf16](svbool_t pg, bfloat16_t *base, svbfloat16x2_t data) + /// + public static unsafe void Store(Vector mask, bfloat16* address, (Vector Value1, Vector Value2) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst3[_bf16](svbool_t pg, bfloat16_t *base, svbfloat16x3_t data) + /// + public static unsafe void Store(Vector mask, bfloat16* address, (Vector Value1, Vector Value2, Vector Value3) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst4[_bf16](svbool_t pg, bfloat16_t *base, svbfloat16x4_t data) + /// + public static unsafe void Store(Vector mask, bfloat16* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) { throw new PlatformNotSupportedException(); } + + + /// StoreNonTemporal : Non-truncating store, non-temporal + + /// + /// void svstnt1[_bf16](svbool_t pg, bfloat16_t *base, svbfloat16_t data) + /// + public static unsafe void StoreNonTemporal(Vector mask, bfloat16* address, Vector data) { throw new PlatformNotSupportedException(); } + + + /// TransposeEven : Interleave even elements from two inputs + + /// + /// svbfloat16_t svtrn1[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// TransposeOdd : Interleave odd elements from two inputs + + /// + /// svbfloat16_t svtrn2[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// UnzipEven : Concatenate even elements from two inputs + + /// + /// svbfloat16_t svuzp1[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// UnzipOdd : Concatenate odd elements from two inputs + + /// + /// svbfloat16_t svuzp2[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// VectorTableLookup : Table lookup in single-vector table + + /// + /// svbfloat16_t svtbl[_bf16](svbfloat16_t data, svuint16_t indices) + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16_t svtbl2[_bf16](svbfloat16x2_t data, svuint16_t indices) + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) { throw new PlatformNotSupportedException(); } + + + /// VectorTableLookupExtension : Table lookup in single-vector table (merging) + + /// + /// svbfloat16_t svtbx[_bf16](svbfloat16_t fallback, svbfloat16_t data, svuint16_t indices) + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) { throw new PlatformNotSupportedException(); } + + + /// ZipHigh : Interleave elements from high halves of two inputs + + /// + /// svbfloat16_t svzip2[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// ZipLow : Interleave elements from low halves of two inputs + + /// + /// svbfloat16_t svzip1[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector ZipLow(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + } +} + diff --git a/sve_api/out_cs_api/SveBf16.System.Runtime.Intrinsics.cs b/sve_api/out_cs_api/SveBf16.System.Runtime.Intrinsics.cs new file mode 100644 index 0000000000000..7129e9d76102a --- /dev/null +++ b/sve_api/out_cs_api/SveBf16.System.Runtime.Intrinsics.cs @@ -0,0 +1,61 @@ + public static System.Numerics.Vector Bfloat16DotProduct(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Bfloat16MatrixMultiplyAccumulate(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector Bfloat16MultiplyAddWideningToSinglePrecisionLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector Bfloat16MultiplyAddWideningToSinglePrecisionLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector Bfloat16MultiplyAddWideningToSinglePrecisionUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector Bfloat16MultiplyAddWideningToSinglePrecisionUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector ConcatenateEvenInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConcatenateOddInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConditionalExtractAfterLastActiveElement(System.Numerics.Vector mask, System.Numerics.Vector defaultValue, System.Numerics.Vector data) { throw null; } + public static bfloat16 ConditionalExtractAfterLastActiveElement(System.Numerics.Vector mask, bfloat16 defaultValues, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractAfterLastActiveElementAndReplicate(System.Numerics.Vector mask, System.Numerics.Vector defaultScalar, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractLastActiveElement(System.Numerics.Vector mask, System.Numerics.Vector defaultValue, System.Numerics.Vector data) { throw null; } + public static bfloat16 ConditionalExtractLastActiveElement(System.Numerics.Vector mask, bfloat16 defaultValues, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractLastActiveElementAndReplicate(System.Numerics.Vector mask, System.Numerics.Vector fallback, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalSelect(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConvertToBFloat16(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector CreateFalseMaskBFloat16() { throw null; } + public static System.Numerics.Vector CreateTrueMaskBFloat16([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static unsafe System.Numerics.Vector CreateWhileReadAfterWriteMask(bfloat16* left, bfloat16* right) { throw null; } + public static unsafe System.Numerics.Vector CreateWhileWriteAfterReadMask(bfloat16* left, bfloat16* right) { throw null; } + public static System.Numerics.Vector DotProductBySelectedScalar(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector DownConvertNarrowingUpper(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(System.Numerics.Vector data, [ConstantExpected] byte index) { throw null; } + public static bfloat16 ExtractAfterLastScalar(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ExtractAfterLastVector(System.Numerics.Vector value) { throw null; } + public static bfloat16 ExtractLastScalar(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ExtractLastVector(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ExtractVector(System.Numerics.Vector upper, System.Numerics.Vector lower, [ConstantExpected] byte index) { throw null; } + public static ulong GetActiveElementCount(System.Numerics.Vector mask, System.Numerics.Vector from) { throw null; } + public static System.Numerics.Vector InsertIntoShiftedVector(System.Numerics.Vector left, bfloat16 right) { throw null; } + public static System.Numerics.Vector InterleaveEvenInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveInt128FromHighHalvesOfTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveInt128FromLowHalvesOfTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveOddInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static unsafe System.Numerics.Vector LoadVector(System.Numerics.Vector mask, bfloat16* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector128AndReplicateToVector(System.Numerics.Vector mask, bfloat16* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector256AndReplicateToVector(System.Numerics.Vector mask, bfloat16* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorFirstFaulting(System.Numerics.Vector mask, bfloat16* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorNonFaulting(bfloat16* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorNonTemporal(System.Numerics.Vector mask, bfloat16* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector) LoadVectorx2(System.Numerics.Vector mask, bfloat16* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) LoadVectorx3(System.Numerics.Vector mask, bfloat16* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) LoadVectorx4(System.Numerics.Vector mask, bfloat16* address) { throw null; } + public static System.Numerics.Vector PopCount(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReverseElement(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Splice(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, bfloat16* address, System.Numerics.Vector data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, bfloat16* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, bfloat16* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2, System.Numerics.Vector Value3) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, bfloat16* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2, System.Numerics.Vector Value3, System.Numerics.Vector Value4) data) { throw null; } + public static unsafe void StoreNonTemporal(System.Numerics.Vector mask, bfloat16* address, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector TransposeEven(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector TransposeOdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector UnzipEven(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector UnzipOdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector VectorTableLookup(System.Numerics.Vector data, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookup((System.Numerics.Vector data1, System.Numerics.Vector data2), System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookupExtension(System.Numerics.Vector fallback, System.Numerics.Vector data, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector ZipHigh(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ZipLow(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + diff --git a/sve_api/out_cs_api/SveBf16.cs b/sve_api/out_cs_api/SveBf16.cs new file mode 100644 index 0000000000000..82a8008f881aa --- /dev/null +++ b/sve_api/out_cs_api/SveBf16.cs @@ -0,0 +1,496 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveBf16 : AdvSimd + { + internal SveBf16() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// Bfloat16DotProduct : BFloat16 dot product + + /// + /// svfloat32_t svbfdot[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3) + /// + public static unsafe Vector Bfloat16DotProduct(Vector addend, Vector left, Vector right) => Bfloat16DotProduct(addend, left, right); + + + /// Bfloat16MatrixMultiplyAccumulate : BFloat16 matrix multiply-accumulate + + /// + /// svfloat32_t svbfmmla[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3) + /// + public static unsafe Vector Bfloat16MatrixMultiplyAccumulate(Vector op1, Vector op2, Vector op3) => Bfloat16MatrixMultiplyAccumulate(op1, op2, op3); + + + /// Bfloat16MultiplyAddWideningToSinglePrecisionLower : BFloat16 multiply-add long to single-precision (bottom) + + /// + /// svfloat32_t svbfmlalb[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3) + /// + public static unsafe Vector Bfloat16MultiplyAddWideningToSinglePrecisionLower(Vector op1, Vector op2, Vector op3) => Bfloat16MultiplyAddWideningToSinglePrecisionLower(op1, op2, op3); + + /// + /// svfloat32_t svbfmlalb_lane[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3, uint64_t imm_index) + /// + public static unsafe Vector Bfloat16MultiplyAddWideningToSinglePrecisionLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => Bfloat16MultiplyAddWideningToSinglePrecisionLower(op1, op2, op3, imm_index); + + + /// Bfloat16MultiplyAddWideningToSinglePrecisionUpper : BFloat16 multiply-add long to single-precision (top) + + /// + /// svfloat32_t svbfmlalt[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3) + /// + public static unsafe Vector Bfloat16MultiplyAddWideningToSinglePrecisionUpper(Vector op1, Vector op2, Vector op3) => Bfloat16MultiplyAddWideningToSinglePrecisionUpper(op1, op2, op3); + + /// + /// svfloat32_t svbfmlalt_lane[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3, uint64_t imm_index) + /// + public static unsafe Vector Bfloat16MultiplyAddWideningToSinglePrecisionUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => Bfloat16MultiplyAddWideningToSinglePrecisionUpper(op1, op2, op3, imm_index); + + + /// ConcatenateEvenInt128FromTwoInputs : Concatenate even quadwords from two inputs + + /// + /// svbfloat16_t svuzp1q[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) => ConcatenateEvenInt128FromTwoInputs(left, right); + + + /// ConcatenateOddInt128FromTwoInputs : Concatenate odd quadwords from two inputs + + /// + /// svbfloat16_t svuzp2q[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) => ConcatenateOddInt128FromTwoInputs(left, right); + + + /// ConditionalExtractAfterLastActiveElement : Conditionally extract element after last + + /// + /// svbfloat16_t svclasta[_bf16](svbool_t pg, svbfloat16_t fallback, svbfloat16_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValue, data); + + /// + /// svbfloat16_t svclasta[_bf16](svbool_t pg, svbfloat16_t fallback, svbfloat16_t data) + /// bfloat16_t svclasta[_n_bf16](svbool_t pg, bfloat16_t fallback, svbfloat16_t data) + /// + public static unsafe bfloat16 ConditionalExtractAfterLastActiveElement(Vector mask, bfloat16 defaultValues, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValues, data); + + + /// ConditionalExtractAfterLastActiveElementAndReplicate : Conditionally extract element after last + + /// + /// svbfloat16_t svclasta[_bf16](svbool_t pg, svbfloat16_t fallback, svbfloat16_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) => ConditionalExtractAfterLastActiveElementAndReplicate(mask, defaultScalar, data); + + + /// ConditionalExtractLastActiveElement : Conditionally extract last element + + /// + /// svbfloat16_t svclastb[_bf16](svbool_t pg, svbfloat16_t fallback, svbfloat16_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValue, data); + + /// + /// svbfloat16_t svclastb[_bf16](svbool_t pg, svbfloat16_t fallback, svbfloat16_t data) + /// bfloat16_t svclastb[_n_bf16](svbool_t pg, bfloat16_t fallback, svbfloat16_t data) + /// + public static unsafe bfloat16 ConditionalExtractLastActiveElement(Vector mask, bfloat16 defaultValues, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValues, data); + + + /// ConditionalExtractLastActiveElementAndReplicate : Conditionally extract last element + + /// + /// svbfloat16_t svclastb[_bf16](svbool_t pg, svbfloat16_t fallback, svbfloat16_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) => ConditionalExtractLastActiveElementAndReplicate(mask, fallback, data); + + + /// ConditionalSelect : Conditionally select elements + + /// + /// svbfloat16_t svsel[_bf16](svbool_t pg, svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) => ConditionalSelect(mask, left, right); + + + /// ConvertToBFloat16 : Floating-point convert + + /// + /// svbfloat16_t svcvt_bf16[_f32]_m(svbfloat16_t inactive, svbool_t pg, svfloat32_t op) + /// svbfloat16_t svcvt_bf16[_f32]_x(svbool_t pg, svfloat32_t op) + /// svbfloat16_t svcvt_bf16[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector ConvertToBFloat16(Vector value) => ConvertToBFloat16(value); + + + + /// CreateFalseMaskBFloat16 : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// + public static unsafe Vector CreateFalseMaskBFloat16() => CreateFalseMaskBFloat16(); + + + /// CreateTrueMaskBFloat16 : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b8(enum svpattern pattern) + /// + public static unsafe Vector CreateTrueMaskBFloat16([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => CreateTrueMaskBFloat16(pattern); + + + /// CreateWhileReadAfterWriteMask : While free of read-after-write conflicts + + /// + /// svbool_t svwhilerw[_bf16](const bfloat16_t *op1, const bfloat16_t *op2) + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(bfloat16* left, bfloat16* right) => CreateWhileReadAfterWriteMask(left, right); + + + /// CreateWhileWriteAfterReadMask : While free of write-after-read conflicts + + /// + /// svbool_t svwhilewr[_bf16](const bfloat16_t *op1, const bfloat16_t *op2) + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(bfloat16* left, bfloat16* right) => CreateWhileWriteAfterReadMask(left, right); + + + /// DotProductBySelectedScalar : BFloat16 dot product + + /// + /// svfloat32_t svbfdot_lane[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3, uint64_t imm_index) + /// + public static unsafe Vector DotProductBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => DotProductBySelectedScalar(addend, left, right, rightIndex); + + + /// DownConvertNarrowingUpper : Down convert and narrow (top) + + /// + /// svbfloat16_t svcvtnt_bf16[_f32]_m(svbfloat16_t even, svbool_t pg, svfloat32_t op) + /// svbfloat16_t svcvtnt_bf16[_f32]_x(svbfloat16_t even, svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector DownConvertNarrowingUpper(Vector value) => DownConvertNarrowingUpper(value); + + + /// DuplicateSelectedScalarToVector : Broadcast a scalar value + + /// + /// svbfloat16_t svdup_lane[_bf16](svbfloat16_t data, uint16_t index) + /// svbfloat16_t svdupq_lane[_bf16](svbfloat16_t data, uint64_t index) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(data, index); + + + /// ExtractAfterLastScalar : Extract element after last + + /// + /// bfloat16_t svlasta[_bf16](svbool_t pg, svbfloat16_t op) + /// + public static unsafe bfloat16 ExtractAfterLastScalar(Vector value) => ExtractAfterLastScalar(value); + + + /// ExtractAfterLastVector : Extract element after last + + /// + /// bfloat16_t svlasta[_bf16](svbool_t pg, svbfloat16_t op) + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) => ExtractAfterLastVector(value); + + + /// ExtractLastScalar : Extract last element + + /// + /// bfloat16_t svlastb[_bf16](svbool_t pg, svbfloat16_t op) + /// + public static unsafe bfloat16 ExtractLastScalar(Vector value) => ExtractLastScalar(value); + + + /// ExtractLastVector : Extract last element + + /// + /// bfloat16_t svlastb[_bf16](svbool_t pg, svbfloat16_t op) + /// + public static unsafe Vector ExtractLastVector(Vector value) => ExtractLastVector(value); + + + /// ExtractVector : Extract vector from pair of vectors + + /// + /// svbfloat16_t svext[_bf16](svbfloat16_t op1, svbfloat16_t op2, uint64_t imm3) + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) => ExtractVector(upper, lower, index); + + + /// GetActiveElementCount : Count set predicate bits + + /// + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) => GetActiveElementCount(mask, from); + + + /// InsertIntoShiftedVector : Insert scalar into shifted vector + + /// + /// svbfloat16_t svinsr[_n_bf16](svbfloat16_t op1, bfloat16_t op2) + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, bfloat16 right) => InsertIntoShiftedVector(left, right); + + + /// InterleaveEvenInt128FromTwoInputs : Interleave even quadwords from two inputs + + /// + /// svbfloat16_t svtrn1q[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) => InterleaveEvenInt128FromTwoInputs(left, right); + + + /// InterleaveInt128FromHighHalvesOfTwoInputs : Interleave quadwords from high halves of two inputs + + /// + /// svbfloat16_t svzip2q[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromHighHalvesOfTwoInputs(left, right); + + + /// InterleaveInt128FromLowHalvesOfTwoInputs : Interleave quadwords from low halves of two inputs + + /// + /// svbfloat16_t svzip1q[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromLowHalvesOfTwoInputs(left, right); + + + /// InterleaveOddInt128FromTwoInputs : Interleave odd quadwords from two inputs + + /// + /// svbfloat16_t svtrn2q[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) => InterleaveOddInt128FromTwoInputs(left, right); + + + /// LoadVector : Unextended load + + /// + /// svbfloat16_t svld1[_bf16](svbool_t pg, const bfloat16_t *base) + /// + public static unsafe Vector LoadVector(Vector mask, bfloat16* address) => LoadVector(mask, address); + + + /// LoadVector128AndReplicateToVector : Load and replicate 128 bits of data + + /// + /// svbfloat16_t svld1rq[_bf16](svbool_t pg, const bfloat16_t *base) + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, bfloat16* address) => LoadVector128AndReplicateToVector(mask, address); + + + /// LoadVector256AndReplicateToVector : Load and replicate 256 bits of data + + /// + /// svbfloat16_t svld1ro[_bf16](svbool_t pg, const bfloat16_t *base) + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, bfloat16* address) => LoadVector256AndReplicateToVector(mask, address); + + + /// LoadVectorFirstFaulting : Unextended load, first-faulting + + /// + /// svbfloat16_t svldff1[_bf16](svbool_t pg, const bfloat16_t *base) + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, bfloat16* address) => LoadVectorFirstFaulting(mask, address); + + + /// LoadVectorNonFaulting : Unextended load, non-faulting + + /// + /// svbfloat16_t svldnf1[_bf16](svbool_t pg, const bfloat16_t *base) + /// + public static unsafe Vector LoadVectorNonFaulting(bfloat16* address) => LoadVectorNonFaulting(address); + + + /// LoadVectorNonTemporal : Unextended load, non-temporal + + /// + /// svbfloat16_t svldnt1[_bf16](svbool_t pg, const bfloat16_t *base) + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, bfloat16* address) => LoadVectorNonTemporal(mask, address); + + + /// LoadVectorx2 : Load two-element tuples into two vectors + + /// + /// svbfloat16x2_t svld2[_bf16](svbool_t pg, const bfloat16_t *base) + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, bfloat16* address) => LoadVectorx2(mask, address); + + + /// LoadVectorx3 : Load three-element tuples into three vectors + + /// + /// svbfloat16x3_t svld3[_bf16](svbool_t pg, const bfloat16_t *base) + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, bfloat16* address) => LoadVectorx3(mask, address); + + + /// LoadVectorx4 : Load four-element tuples into four vectors + + /// + /// svbfloat16x4_t svld4[_bf16](svbool_t pg, const bfloat16_t *base) + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, bfloat16* address) => LoadVectorx4(mask, address); + + + /// PopCount : Count nonzero bits + + /// + /// svuint16_t svcnt[_bf16]_m(svuint16_t inactive, svbool_t pg, svbfloat16_t op) + /// svuint16_t svcnt[_bf16]_x(svbool_t pg, svbfloat16_t op) + /// svuint16_t svcnt[_bf16]_z(svbool_t pg, svbfloat16_t op) + /// + public static unsafe Vector PopCount(Vector value) => PopCount(value); + + + /// ReverseElement : Reverse all elements + + /// + /// svbfloat16_t svrev[_bf16](svbfloat16_t op) + /// + public static unsafe Vector ReverseElement(Vector value) => ReverseElement(value); + + + /// Splice : Splice two vectors under predicate control + + /// + /// svbfloat16_t svsplice[_bf16](svbool_t pg, svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) => Splice(mask, left, right); + + + /// Store : Non-truncating store + + /// + /// void svst1[_bf16](svbool_t pg, bfloat16_t *base, svbfloat16_t data) + /// + public static unsafe void Store(Vector mask, bfloat16* address, Vector data) => Store(mask, address, data); + + /// + /// void svst2[_bf16](svbool_t pg, bfloat16_t *base, svbfloat16x2_t data) + /// + public static unsafe void Store(Vector mask, bfloat16* address, (Vector Value1, Vector Value2) data) => Store(mask, address, Value1,); + + /// + /// void svst3[_bf16](svbool_t pg, bfloat16_t *base, svbfloat16x3_t data) + /// + public static unsafe void Store(Vector mask, bfloat16* address, (Vector Value1, Vector Value2, Vector Value3) data) => Store(mask, address, Value1,); + + /// + /// void svst4[_bf16](svbool_t pg, bfloat16_t *base, svbfloat16x4_t data) + /// + public static unsafe void Store(Vector mask, bfloat16* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) => Store(mask, address, Value1,); + + + /// StoreNonTemporal : Non-truncating store, non-temporal + + /// + /// void svstnt1[_bf16](svbool_t pg, bfloat16_t *base, svbfloat16_t data) + /// + public static unsafe void StoreNonTemporal(Vector mask, bfloat16* address, Vector data) => StoreNonTemporal(mask, address, data); + + + /// TransposeEven : Interleave even elements from two inputs + + /// + /// svbfloat16_t svtrn1[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) => TransposeEven(left, right); + + + /// TransposeOdd : Interleave odd elements from two inputs + + /// + /// svbfloat16_t svtrn2[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) => TransposeOdd(left, right); + + + /// UnzipEven : Concatenate even elements from two inputs + + /// + /// svbfloat16_t svuzp1[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) => UnzipEven(left, right); + + + /// UnzipOdd : Concatenate odd elements from two inputs + + /// + /// svbfloat16_t svuzp2[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) => UnzipOdd(left, right); + + + /// VectorTableLookup : Table lookup in single-vector table + + /// + /// svbfloat16_t svtbl[_bf16](svbfloat16_t data, svuint16_t indices) + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) => VectorTableLookup(data, indices); + + /// + /// svbfloat16_t svtbl2[_bf16](svbfloat16x2_t data, svuint16_t indices) + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) => VectorTableLookup(data1,, indices); + + + /// VectorTableLookupExtension : Table lookup in single-vector table (merging) + + /// + /// svbfloat16_t svtbx[_bf16](svbfloat16_t fallback, svbfloat16_t data, svuint16_t indices) + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) => VectorTableLookupExtension(fallback, data, indices); + + + /// ZipHigh : Interleave elements from high halves of two inputs + + /// + /// svbfloat16_t svzip2[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) => ZipHigh(left, right); + + + /// ZipLow : Interleave elements from low halves of two inputs + + /// + /// svbfloat16_t svzip1[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// + public static unsafe Vector ZipLow(Vector left, Vector right) => ZipLow(left, right); + + } +} + diff --git a/sve_api/out_cs_api/SveBitperm.PlatformNotSupported.cs b/sve_api/out_cs_api/SveBitperm.PlatformNotSupported.cs new file mode 100644 index 0000000000000..437699fbc0b82 --- /dev/null +++ b/sve_api/out_cs_api/SveBitperm.PlatformNotSupported.cs @@ -0,0 +1,100 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveBitperm : AdvSimd + { + internal SveBitperm() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// GatherLowerBitsFromPositionsSelectedByBitmask : Gather lower bits from positions selected by bitmask + + /// + /// svuint8_t svbext[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector GatherLowerBitsFromPositionsSelectedByBitmask(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svbext[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector GatherLowerBitsFromPositionsSelectedByBitmask(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svbext[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector GatherLowerBitsFromPositionsSelectedByBitmask(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svbext[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector GatherLowerBitsFromPositionsSelectedByBitmask(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// GroupBitsToRightOrLeftAsSelectedByBitmask : Group bits to right or left as selected by bitmask + + /// + /// svuint8_t svbgrp[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector GroupBitsToRightOrLeftAsSelectedByBitmask(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svbgrp[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector GroupBitsToRightOrLeftAsSelectedByBitmask(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svbgrp[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector GroupBitsToRightOrLeftAsSelectedByBitmask(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svbgrp[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector GroupBitsToRightOrLeftAsSelectedByBitmask(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// ScatterLowerBitsIntoPositionsSelectedByBitmask : Scatter lower bits into positions selected by bitmask + + /// + /// svuint8_t svbdep[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector ScatterLowerBitsIntoPositionsSelectedByBitmask(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svbdep[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector ScatterLowerBitsIntoPositionsSelectedByBitmask(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svbdep[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector ScatterLowerBitsIntoPositionsSelectedByBitmask(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svbdep[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector ScatterLowerBitsIntoPositionsSelectedByBitmask(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + } +} + diff --git a/sve_api/out_cs_api/SveBitperm.System.Runtime.Intrinsics.cs b/sve_api/out_cs_api/SveBitperm.System.Runtime.Intrinsics.cs new file mode 100644 index 0000000000000..ae2c56bf44b87 --- /dev/null +++ b/sve_api/out_cs_api/SveBitperm.System.Runtime.Intrinsics.cs @@ -0,0 +1,13 @@ + public static System.Numerics.Vector GatherLowerBitsFromPositionsSelectedByBitmask(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector GatherLowerBitsFromPositionsSelectedByBitmask(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector GatherLowerBitsFromPositionsSelectedByBitmask(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector GatherLowerBitsFromPositionsSelectedByBitmask(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector GroupBitsToRightOrLeftAsSelectedByBitmask(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector GroupBitsToRightOrLeftAsSelectedByBitmask(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector GroupBitsToRightOrLeftAsSelectedByBitmask(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector GroupBitsToRightOrLeftAsSelectedByBitmask(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ScatterLowerBitsIntoPositionsSelectedByBitmask(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ScatterLowerBitsIntoPositionsSelectedByBitmask(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ScatterLowerBitsIntoPositionsSelectedByBitmask(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ScatterLowerBitsIntoPositionsSelectedByBitmask(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + diff --git a/sve_api/out_cs_api/SveBitperm.cs b/sve_api/out_cs_api/SveBitperm.cs new file mode 100644 index 0000000000000..c352b6de2d824 --- /dev/null +++ b/sve_api/out_cs_api/SveBitperm.cs @@ -0,0 +1,100 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveBitperm : AdvSimd + { + internal SveBitperm() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// GatherLowerBitsFromPositionsSelectedByBitmask : Gather lower bits from positions selected by bitmask + + /// + /// svuint8_t svbext[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector GatherLowerBitsFromPositionsSelectedByBitmask(Vector left, Vector right) => GatherLowerBitsFromPositionsSelectedByBitmask(left, right); + + /// + /// svuint16_t svbext[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector GatherLowerBitsFromPositionsSelectedByBitmask(Vector left, Vector right) => GatherLowerBitsFromPositionsSelectedByBitmask(left, right); + + /// + /// svuint32_t svbext[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector GatherLowerBitsFromPositionsSelectedByBitmask(Vector left, Vector right) => GatherLowerBitsFromPositionsSelectedByBitmask(left, right); + + /// + /// svuint64_t svbext[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector GatherLowerBitsFromPositionsSelectedByBitmask(Vector left, Vector right) => GatherLowerBitsFromPositionsSelectedByBitmask(left, right); + + + /// GroupBitsToRightOrLeftAsSelectedByBitmask : Group bits to right or left as selected by bitmask + + /// + /// svuint8_t svbgrp[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector GroupBitsToRightOrLeftAsSelectedByBitmask(Vector left, Vector right) => GroupBitsToRightOrLeftAsSelectedByBitmask(left, right); + + /// + /// svuint16_t svbgrp[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector GroupBitsToRightOrLeftAsSelectedByBitmask(Vector left, Vector right) => GroupBitsToRightOrLeftAsSelectedByBitmask(left, right); + + /// + /// svuint32_t svbgrp[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector GroupBitsToRightOrLeftAsSelectedByBitmask(Vector left, Vector right) => GroupBitsToRightOrLeftAsSelectedByBitmask(left, right); + + /// + /// svuint64_t svbgrp[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector GroupBitsToRightOrLeftAsSelectedByBitmask(Vector left, Vector right) => GroupBitsToRightOrLeftAsSelectedByBitmask(left, right); + + + /// ScatterLowerBitsIntoPositionsSelectedByBitmask : Scatter lower bits into positions selected by bitmask + + /// + /// svuint8_t svbdep[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector ScatterLowerBitsIntoPositionsSelectedByBitmask(Vector left, Vector right) => ScatterLowerBitsIntoPositionsSelectedByBitmask(left, right); + + /// + /// svuint16_t svbdep[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector ScatterLowerBitsIntoPositionsSelectedByBitmask(Vector left, Vector right) => ScatterLowerBitsIntoPositionsSelectedByBitmask(left, right); + + /// + /// svuint32_t svbdep[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector ScatterLowerBitsIntoPositionsSelectedByBitmask(Vector left, Vector right) => ScatterLowerBitsIntoPositionsSelectedByBitmask(left, right); + + /// + /// svuint64_t svbdep[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector ScatterLowerBitsIntoPositionsSelectedByBitmask(Vector left, Vector right) => ScatterLowerBitsIntoPositionsSelectedByBitmask(left, right); + + } +} + diff --git a/sve_api/out_cs_api/SveF32mm.PlatformNotSupported.cs b/sve_api/out_cs_api/SveF32mm.PlatformNotSupported.cs new file mode 100644 index 0000000000000..65b5d59a73416 --- /dev/null +++ b/sve_api/out_cs_api/SveF32mm.PlatformNotSupported.cs @@ -0,0 +1,39 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveF32mm : AdvSimd + { + internal SveF32mm() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// MatrixMultiplyAccumulate : Matrix multiply-accumulate + + /// + /// svfloat32_t svmmla[_f32](svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// + public static unsafe Vector MatrixMultiplyAccumulate(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + } +} + diff --git a/sve_api/out_cs_api/SveF32mm.System.Runtime.Intrinsics.cs b/sve_api/out_cs_api/SveF32mm.System.Runtime.Intrinsics.cs new file mode 100644 index 0000000000000..489229df2c5d5 --- /dev/null +++ b/sve_api/out_cs_api/SveF32mm.System.Runtime.Intrinsics.cs @@ -0,0 +1,2 @@ + public static System.Numerics.Vector MatrixMultiplyAccumulate(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + diff --git a/sve_api/out_cs_api/SveF32mm.cs b/sve_api/out_cs_api/SveF32mm.cs new file mode 100644 index 0000000000000..c28722bcd0c5a --- /dev/null +++ b/sve_api/out_cs_api/SveF32mm.cs @@ -0,0 +1,39 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveF32mm : AdvSimd + { + internal SveF32mm() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// MatrixMultiplyAccumulate : Matrix multiply-accumulate + + /// + /// svfloat32_t svmmla[_f32](svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// + public static unsafe Vector MatrixMultiplyAccumulate(Vector op1, Vector op2, Vector op3) => MatrixMultiplyAccumulate(op1, op2, op3); + + } +} + diff --git a/sve_api/out_cs_api/SveF64mm.PlatformNotSupported.cs b/sve_api/out_cs_api/SveF64mm.PlatformNotSupported.cs new file mode 100644 index 0000000000000..3845d42a599c4 --- /dev/null +++ b/sve_api/out_cs_api/SveF64mm.PlatformNotSupported.cs @@ -0,0 +1,410 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveF64mm : AdvSimd + { + internal SveF64mm() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// ConcatenateEvenInt128FromTwoInputs : Concatenate even quadwords from two inputs + + /// + /// svint8_t svuzp1q[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svuzp1q[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svuzp1q[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svuzp1q[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svuzp1q[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svuzp1q[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svuzp1q[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svuzp1q[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svuzp1q[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svuzp1q[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// ConcatenateOddInt128FromTwoInputs : Concatenate odd quadwords from two inputs + + /// + /// svint8_t svuzp2q[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svuzp2q[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svuzp2q[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svuzp2q[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svuzp2q[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svuzp2q[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svuzp2q[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svuzp2q[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svuzp2q[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svuzp2q[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// InterleaveEvenInt128FromTwoInputs : Interleave even quadwords from two inputs + + /// + /// svint8_t svtrn1q[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svtrn1q[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svtrn1q[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svtrn1q[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svtrn1q[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svtrn1q[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svtrn1q[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svtrn1q[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svtrn1q[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svtrn1q[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// InterleaveInt128FromHighHalvesOfTwoInputs : Interleave quadwords from high halves of two inputs + + /// + /// svint8_t svzip2q[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svzip2q[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svzip2q[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svzip2q[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svzip2q[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svzip2q[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svzip2q[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svzip2q[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svzip2q[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svzip2q[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// InterleaveInt128FromLowHalvesOfTwoInputs : Interleave quadwords from low halves of two inputs + + /// + /// svint8_t svzip1q[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svzip1q[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svzip1q[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svzip1q[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svzip1q[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svzip1q[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svzip1q[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svzip1q[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svzip1q[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svzip1q[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// InterleaveOddInt128FromTwoInputs : Interleave odd quadwords from two inputs + + /// + /// svint8_t svtrn2q[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svtrn2q[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svtrn2q[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svtrn2q[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svtrn2q[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svtrn2q[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svtrn2q[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svtrn2q[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svtrn2q[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svtrn2q[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// LoadVector256AndReplicateToVector : Load and replicate 256 bits of data + + /// + /// svint8_t svld1ro[_s8](svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, sbyte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svld1ro[_s16](svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, short* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svld1ro[_s32](svbool_t pg, const int32_t *base) + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, int* address) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svld1ro[_s64](svbool_t pg, const int64_t *base) + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, long* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svld1ro[_u8](svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, byte* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svld1ro[_u16](svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, ushort* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svld1ro[_u32](svbool_t pg, const uint32_t *base) + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, uint* address) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svld1ro[_u64](svbool_t pg, const uint64_t *base) + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, ulong* address) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svld1ro[_f32](svbool_t pg, const float32_t *base) + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, float* address) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svld1ro[_f64](svbool_t pg, const float64_t *base) + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, double* address) { throw new PlatformNotSupportedException(); } + + + /// MatrixMultiplyAccumulate : Matrix multiply-accumulate + + /// + /// svfloat64_t svmmla[_f64](svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// + public static unsafe Vector MatrixMultiplyAccumulate(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + } +} + diff --git a/sve_api/out_cs_api/SveF64mm.System.Runtime.Intrinsics.cs b/sve_api/out_cs_api/SveF64mm.System.Runtime.Intrinsics.cs new file mode 100644 index 0000000000000..de1313de8d0cd --- /dev/null +++ b/sve_api/out_cs_api/SveF64mm.System.Runtime.Intrinsics.cs @@ -0,0 +1,72 @@ + public static System.Numerics.Vector ConcatenateEvenInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConcatenateEvenInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConcatenateEvenInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConcatenateEvenInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConcatenateEvenInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConcatenateEvenInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConcatenateEvenInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConcatenateEvenInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConcatenateEvenInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConcatenateEvenInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConcatenateOddInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConcatenateOddInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConcatenateOddInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConcatenateOddInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConcatenateOddInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConcatenateOddInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConcatenateOddInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConcatenateOddInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConcatenateOddInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConcatenateOddInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveEvenInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveEvenInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveEvenInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveEvenInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveEvenInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveEvenInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveEvenInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveEvenInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveEvenInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveEvenInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveInt128FromHighHalvesOfTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveInt128FromHighHalvesOfTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveInt128FromHighHalvesOfTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveInt128FromHighHalvesOfTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveInt128FromHighHalvesOfTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveInt128FromHighHalvesOfTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveInt128FromHighHalvesOfTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveInt128FromHighHalvesOfTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveInt128FromHighHalvesOfTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveInt128FromHighHalvesOfTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveInt128FromLowHalvesOfTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveInt128FromLowHalvesOfTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveInt128FromLowHalvesOfTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveInt128FromLowHalvesOfTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveInt128FromLowHalvesOfTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveInt128FromLowHalvesOfTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveInt128FromLowHalvesOfTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveInt128FromLowHalvesOfTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveInt128FromLowHalvesOfTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveInt128FromLowHalvesOfTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveOddInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveOddInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveOddInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveOddInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveOddInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveOddInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveOddInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveOddInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveOddInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveOddInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static unsafe System.Numerics.Vector LoadVector256AndReplicateToVector(System.Numerics.Vector mask, sbyte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector256AndReplicateToVector(System.Numerics.Vector mask, short* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector256AndReplicateToVector(System.Numerics.Vector mask, int* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector256AndReplicateToVector(System.Numerics.Vector mask, long* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector256AndReplicateToVector(System.Numerics.Vector mask, byte* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector256AndReplicateToVector(System.Numerics.Vector mask, ushort* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector256AndReplicateToVector(System.Numerics.Vector mask, uint* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector256AndReplicateToVector(System.Numerics.Vector mask, ulong* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector256AndReplicateToVector(System.Numerics.Vector mask, float* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector256AndReplicateToVector(System.Numerics.Vector mask, double* address) { throw null; } + public static System.Numerics.Vector MatrixMultiplyAccumulate(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + diff --git a/sve_api/out_cs_api/SveF64mm.cs b/sve_api/out_cs_api/SveF64mm.cs new file mode 100644 index 0000000000000..a4bbe12d6d34c --- /dev/null +++ b/sve_api/out_cs_api/SveF64mm.cs @@ -0,0 +1,410 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveF64mm : AdvSimd + { + internal SveF64mm() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// ConcatenateEvenInt128FromTwoInputs : Concatenate even quadwords from two inputs + + /// + /// svint8_t svuzp1q[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) => ConcatenateEvenInt128FromTwoInputs(left, right); + + /// + /// svint16_t svuzp1q[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) => ConcatenateEvenInt128FromTwoInputs(left, right); + + /// + /// svint32_t svuzp1q[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) => ConcatenateEvenInt128FromTwoInputs(left, right); + + /// + /// svint64_t svuzp1q[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) => ConcatenateEvenInt128FromTwoInputs(left, right); + + /// + /// svuint8_t svuzp1q[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) => ConcatenateEvenInt128FromTwoInputs(left, right); + + /// + /// svuint16_t svuzp1q[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) => ConcatenateEvenInt128FromTwoInputs(left, right); + + /// + /// svuint32_t svuzp1q[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) => ConcatenateEvenInt128FromTwoInputs(left, right); + + /// + /// svuint64_t svuzp1q[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) => ConcatenateEvenInt128FromTwoInputs(left, right); + + /// + /// svfloat32_t svuzp1q[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) => ConcatenateEvenInt128FromTwoInputs(left, right); + + /// + /// svfloat64_t svuzp1q[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) => ConcatenateEvenInt128FromTwoInputs(left, right); + + + /// ConcatenateOddInt128FromTwoInputs : Concatenate odd quadwords from two inputs + + /// + /// svint8_t svuzp2q[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) => ConcatenateOddInt128FromTwoInputs(left, right); + + /// + /// svint16_t svuzp2q[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) => ConcatenateOddInt128FromTwoInputs(left, right); + + /// + /// svint32_t svuzp2q[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) => ConcatenateOddInt128FromTwoInputs(left, right); + + /// + /// svint64_t svuzp2q[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) => ConcatenateOddInt128FromTwoInputs(left, right); + + /// + /// svuint8_t svuzp2q[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) => ConcatenateOddInt128FromTwoInputs(left, right); + + /// + /// svuint16_t svuzp2q[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) => ConcatenateOddInt128FromTwoInputs(left, right); + + /// + /// svuint32_t svuzp2q[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) => ConcatenateOddInt128FromTwoInputs(left, right); + + /// + /// svuint64_t svuzp2q[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) => ConcatenateOddInt128FromTwoInputs(left, right); + + /// + /// svfloat32_t svuzp2q[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) => ConcatenateOddInt128FromTwoInputs(left, right); + + /// + /// svfloat64_t svuzp2q[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) => ConcatenateOddInt128FromTwoInputs(left, right); + + + /// InterleaveEvenInt128FromTwoInputs : Interleave even quadwords from two inputs + + /// + /// svint8_t svtrn1q[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) => InterleaveEvenInt128FromTwoInputs(left, right); + + /// + /// svint16_t svtrn1q[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) => InterleaveEvenInt128FromTwoInputs(left, right); + + /// + /// svint32_t svtrn1q[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) => InterleaveEvenInt128FromTwoInputs(left, right); + + /// + /// svint64_t svtrn1q[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) => InterleaveEvenInt128FromTwoInputs(left, right); + + /// + /// svuint8_t svtrn1q[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) => InterleaveEvenInt128FromTwoInputs(left, right); + + /// + /// svuint16_t svtrn1q[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) => InterleaveEvenInt128FromTwoInputs(left, right); + + /// + /// svuint32_t svtrn1q[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) => InterleaveEvenInt128FromTwoInputs(left, right); + + /// + /// svuint64_t svtrn1q[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) => InterleaveEvenInt128FromTwoInputs(left, right); + + /// + /// svfloat32_t svtrn1q[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) => InterleaveEvenInt128FromTwoInputs(left, right); + + /// + /// svfloat64_t svtrn1q[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) => InterleaveEvenInt128FromTwoInputs(left, right); + + + /// InterleaveInt128FromHighHalvesOfTwoInputs : Interleave quadwords from high halves of two inputs + + /// + /// svint8_t svzip2q[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromHighHalvesOfTwoInputs(left, right); + + /// + /// svint16_t svzip2q[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromHighHalvesOfTwoInputs(left, right); + + /// + /// svint32_t svzip2q[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromHighHalvesOfTwoInputs(left, right); + + /// + /// svint64_t svzip2q[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromHighHalvesOfTwoInputs(left, right); + + /// + /// svuint8_t svzip2q[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromHighHalvesOfTwoInputs(left, right); + + /// + /// svuint16_t svzip2q[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromHighHalvesOfTwoInputs(left, right); + + /// + /// svuint32_t svzip2q[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromHighHalvesOfTwoInputs(left, right); + + /// + /// svuint64_t svzip2q[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromHighHalvesOfTwoInputs(left, right); + + /// + /// svfloat32_t svzip2q[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromHighHalvesOfTwoInputs(left, right); + + /// + /// svfloat64_t svzip2q[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromHighHalvesOfTwoInputs(left, right); + + + /// InterleaveInt128FromLowHalvesOfTwoInputs : Interleave quadwords from low halves of two inputs + + /// + /// svint8_t svzip1q[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromLowHalvesOfTwoInputs(left, right); + + /// + /// svint16_t svzip1q[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromLowHalvesOfTwoInputs(left, right); + + /// + /// svint32_t svzip1q[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromLowHalvesOfTwoInputs(left, right); + + /// + /// svint64_t svzip1q[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromLowHalvesOfTwoInputs(left, right); + + /// + /// svuint8_t svzip1q[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromLowHalvesOfTwoInputs(left, right); + + /// + /// svuint16_t svzip1q[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromLowHalvesOfTwoInputs(left, right); + + /// + /// svuint32_t svzip1q[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromLowHalvesOfTwoInputs(left, right); + + /// + /// svuint64_t svzip1q[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromLowHalvesOfTwoInputs(left, right); + + /// + /// svfloat32_t svzip1q[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromLowHalvesOfTwoInputs(left, right); + + /// + /// svfloat64_t svzip1q[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromLowHalvesOfTwoInputs(left, right); + + + /// InterleaveOddInt128FromTwoInputs : Interleave odd quadwords from two inputs + + /// + /// svint8_t svtrn2q[_s8](svint8_t op1, svint8_t op2) + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) => InterleaveOddInt128FromTwoInputs(left, right); + + /// + /// svint16_t svtrn2q[_s16](svint16_t op1, svint16_t op2) + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) => InterleaveOddInt128FromTwoInputs(left, right); + + /// + /// svint32_t svtrn2q[_s32](svint32_t op1, svint32_t op2) + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) => InterleaveOddInt128FromTwoInputs(left, right); + + /// + /// svint64_t svtrn2q[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) => InterleaveOddInt128FromTwoInputs(left, right); + + /// + /// svuint8_t svtrn2q[_u8](svuint8_t op1, svuint8_t op2) + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) => InterleaveOddInt128FromTwoInputs(left, right); + + /// + /// svuint16_t svtrn2q[_u16](svuint16_t op1, svuint16_t op2) + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) => InterleaveOddInt128FromTwoInputs(left, right); + + /// + /// svuint32_t svtrn2q[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) => InterleaveOddInt128FromTwoInputs(left, right); + + /// + /// svuint64_t svtrn2q[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) => InterleaveOddInt128FromTwoInputs(left, right); + + /// + /// svfloat32_t svtrn2q[_f32](svfloat32_t op1, svfloat32_t op2) + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) => InterleaveOddInt128FromTwoInputs(left, right); + + /// + /// svfloat64_t svtrn2q[_f64](svfloat64_t op1, svfloat64_t op2) + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) => InterleaveOddInt128FromTwoInputs(left, right); + + + /// LoadVector256AndReplicateToVector : Load and replicate 256 bits of data + + /// + /// svint8_t svld1ro[_s8](svbool_t pg, const int8_t *base) + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, sbyte* address) => LoadVector256AndReplicateToVector(mask, address); + + /// + /// svint16_t svld1ro[_s16](svbool_t pg, const int16_t *base) + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, short* address) => LoadVector256AndReplicateToVector(mask, address); + + /// + /// svint32_t svld1ro[_s32](svbool_t pg, const int32_t *base) + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, int* address) => LoadVector256AndReplicateToVector(mask, address); + + /// + /// svint64_t svld1ro[_s64](svbool_t pg, const int64_t *base) + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, long* address) => LoadVector256AndReplicateToVector(mask, address); + + /// + /// svuint8_t svld1ro[_u8](svbool_t pg, const uint8_t *base) + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, byte* address) => LoadVector256AndReplicateToVector(mask, address); + + /// + /// svuint16_t svld1ro[_u16](svbool_t pg, const uint16_t *base) + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, ushort* address) => LoadVector256AndReplicateToVector(mask, address); + + /// + /// svuint32_t svld1ro[_u32](svbool_t pg, const uint32_t *base) + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, uint* address) => LoadVector256AndReplicateToVector(mask, address); + + /// + /// svuint64_t svld1ro[_u64](svbool_t pg, const uint64_t *base) + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, ulong* address) => LoadVector256AndReplicateToVector(mask, address); + + /// + /// svfloat32_t svld1ro[_f32](svbool_t pg, const float32_t *base) + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, float* address) => LoadVector256AndReplicateToVector(mask, address); + + /// + /// svfloat64_t svld1ro[_f64](svbool_t pg, const float64_t *base) + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, double* address) => LoadVector256AndReplicateToVector(mask, address); + + + /// MatrixMultiplyAccumulate : Matrix multiply-accumulate + + /// + /// svfloat64_t svmmla[_f64](svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// + public static unsafe Vector MatrixMultiplyAccumulate(Vector op1, Vector op2, Vector op3) => MatrixMultiplyAccumulate(op1, op2, op3); + + } +} + diff --git a/sve_api/out_cs_api/SveFp16.PlatformNotSupported.cs b/sve_api/out_cs_api/SveFp16.PlatformNotSupported.cs new file mode 100644 index 0000000000000..15d80dfbc492e --- /dev/null +++ b/sve_api/out_cs_api/SveFp16.PlatformNotSupported.cs @@ -0,0 +1,1195 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveFp16 : AdvSimd + { + internal SveFp16() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// Abs : Absolute value + + /// + /// svfloat16_t svabs[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) + /// svfloat16_t svabs[_f16]_x(svbool_t pg, svfloat16_t op) + /// svfloat16_t svabs[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector Abs(Vector value) { throw new PlatformNotSupportedException(); } + + + /// AbsoluteCompareGreaterThan : Absolute compare greater than + + /// + /// svbool_t svacgt[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector AbsoluteCompareGreaterThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AbsoluteCompareGreaterThanOrEqual : Absolute compare greater than or equal to + + /// + /// svbool_t svacge[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector AbsoluteCompareGreaterThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AbsoluteCompareLessThan : Absolute compare less than + + /// + /// svbool_t svaclt[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector AbsoluteCompareLessThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AbsoluteCompareLessThanOrEqual : Absolute compare less than or equal to + + /// + /// svbool_t svacle[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector AbsoluteCompareLessThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AbsoluteDifference : Absolute difference + + /// + /// svfloat16_t svabd[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svabd[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svabd[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// Add : Add + + /// + /// svfloat16_t svadd[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svadd[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svadd[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector Add(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AddAcross : Add reduction + + /// + /// float16_t svaddv[_f16](svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector AddAcross(Vector value) { throw new PlatformNotSupportedException(); } + + + /// AddPairwise : Add pairwise + + /// + /// svfloat16_t svaddp[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svaddp[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// AddRotateComplex : Complex add with rotate + + /// + /// svfloat16_t svcadd[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2, uint64_t imm_rotation) + /// svfloat16_t svcadd[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2, uint64_t imm_rotation) + /// svfloat16_t svcadd[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + + /// AddSequentialAcross : Add reduction (strictly-ordered) + + /// + /// float16_t svadda[_f16](svbool_t pg, float16_t initial, svfloat16_t op) + /// + public static unsafe Vector AddSequentialAcross(Vector initial, Vector value) { throw new PlatformNotSupportedException(); } + + + /// CompareEqual : Compare equal to + + /// + /// svbool_t svcmpeq[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// CompareGreaterThan : Compare greater than + + /// + /// svbool_t svcmpgt[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// CompareGreaterThanOrEqual : Compare greater than or equal to + + /// + /// svbool_t svcmpge[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// CompareLessThan : Compare less than + + /// + /// svbool_t svcmplt[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// CompareLessThanOrEqual : Compare less than or equal to + + /// + /// svbool_t svcmple[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// CompareNotEqualTo : Compare not equal to + + /// + /// svbool_t svcmpne[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// CompareUnordered : Compare unordered with + + /// + /// svbool_t svcmpuo[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector CompareUnordered(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// ConcatenateEvenInt128FromTwoInputs : Concatenate even quadwords from two inputs + + /// + /// svfloat16_t svuzp1q[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// ConcatenateOddInt128FromTwoInputs : Concatenate odd quadwords from two inputs + + /// + /// svfloat16_t svuzp2q[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// ConditionalExtractAfterLastActiveElement : Conditionally extract element after last + + /// + /// svfloat16_t svclasta[_f16](svbool_t pg, svfloat16_t fallback, svfloat16_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16_t svclasta[_f16](svbool_t pg, svfloat16_t fallback, svfloat16_t data) + /// float16_t svclasta[_n_f16](svbool_t pg, float16_t fallback, svfloat16_t data) + /// + public static unsafe half ConditionalExtractAfterLastActiveElement(Vector mask, half defaultValues, Vector data) { throw new PlatformNotSupportedException(); } + + + /// ConditionalExtractAfterLastActiveElementAndReplicate : Conditionally extract element after last + + /// + /// svfloat16_t svclasta[_f16](svbool_t pg, svfloat16_t fallback, svfloat16_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) { throw new PlatformNotSupportedException(); } + + + /// ConditionalExtractLastActiveElement : Conditionally extract last element + + /// + /// svfloat16_t svclastb[_f16](svbool_t pg, svfloat16_t fallback, svfloat16_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16_t svclastb[_f16](svbool_t pg, svfloat16_t fallback, svfloat16_t data) + /// float16_t svclastb[_n_f16](svbool_t pg, float16_t fallback, svfloat16_t data) + /// + public static unsafe half ConditionalExtractLastActiveElement(Vector mask, half defaultValues, Vector data) { throw new PlatformNotSupportedException(); } + + + /// ConditionalExtractLastActiveElementAndReplicate : Conditionally extract last element + + /// + /// svfloat16_t svclastb[_f16](svbool_t pg, svfloat16_t fallback, svfloat16_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) { throw new PlatformNotSupportedException(); } + + + /// ConditionalSelect : Conditionally select elements + + /// + /// svfloat16_t svsel[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// ConvertToDouble : Floating-point convert + + /// + /// svfloat64_t svcvt_f64[_f16]_m(svfloat64_t inactive, svbool_t pg, svfloat16_t op) + /// svfloat64_t svcvt_f64[_f16]_x(svbool_t pg, svfloat16_t op) + /// svfloat64_t svcvt_f64[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector ConvertToDouble(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ConvertToHalf : Floating-point convert + + /// + /// svfloat16_t svcvt_f16[_s16]_m(svfloat16_t inactive, svbool_t pg, svint16_t op) + /// svfloat16_t svcvt_f16[_s16]_x(svbool_t pg, svint16_t op) + /// svfloat16_t svcvt_f16[_s16]_z(svbool_t pg, svint16_t op) + /// + public static unsafe Vector ConvertToHalf(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16_t svcvt_f16[_s32]_m(svfloat16_t inactive, svbool_t pg, svint32_t op) + /// svfloat16_t svcvt_f16[_s32]_x(svbool_t pg, svint32_t op) + /// svfloat16_t svcvt_f16[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector ConvertToHalf(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16_t svcvt_f16[_s64]_m(svfloat16_t inactive, svbool_t pg, svint64_t op) + /// svfloat16_t svcvt_f16[_s64]_x(svbool_t pg, svint64_t op) + /// svfloat16_t svcvt_f16[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector ConvertToHalf(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16_t svcvt_f16[_u16]_m(svfloat16_t inactive, svbool_t pg, svuint16_t op) + /// svfloat16_t svcvt_f16[_u16]_x(svbool_t pg, svuint16_t op) + /// svfloat16_t svcvt_f16[_u16]_z(svbool_t pg, svuint16_t op) + /// + public static unsafe Vector ConvertToHalf(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16_t svcvt_f16[_u32]_m(svfloat16_t inactive, svbool_t pg, svuint32_t op) + /// svfloat16_t svcvt_f16[_u32]_x(svbool_t pg, svuint32_t op) + /// svfloat16_t svcvt_f16[_u32]_z(svbool_t pg, svuint32_t op) + /// + public static unsafe Vector ConvertToHalf(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16_t svcvt_f16[_u64]_m(svfloat16_t inactive, svbool_t pg, svuint64_t op) + /// svfloat16_t svcvt_f16[_u64]_x(svbool_t pg, svuint64_t op) + /// svfloat16_t svcvt_f16[_u64]_z(svbool_t pg, svuint64_t op) + /// + public static unsafe Vector ConvertToHalf(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16_t svcvt_f16[_f32]_m(svfloat16_t inactive, svbool_t pg, svfloat32_t op) + /// svfloat16_t svcvt_f16[_f32]_x(svbool_t pg, svfloat32_t op) + /// svfloat16_t svcvt_f16[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector ConvertToHalf(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16_t svcvt_f16[_f64]_m(svfloat16_t inactive, svbool_t pg, svfloat64_t op) + /// svfloat16_t svcvt_f16[_f64]_x(svbool_t pg, svfloat64_t op) + /// svfloat16_t svcvt_f16[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector ConvertToHalf(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ConvertToInt16 : Floating-point convert + + /// + /// svint16_t svcvt_s16[_f16]_m(svint16_t inactive, svbool_t pg, svfloat16_t op) + /// svint16_t svcvt_s16[_f16]_x(svbool_t pg, svfloat16_t op) + /// svint16_t svcvt_s16[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector ConvertToInt16(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ConvertToInt32 : Floating-point convert + + /// + /// svint32_t svcvt_s32[_f16]_m(svint32_t inactive, svbool_t pg, svfloat16_t op) + /// svint32_t svcvt_s32[_f16]_x(svbool_t pg, svfloat16_t op) + /// svint32_t svcvt_s32[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector ConvertToInt32(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ConvertToInt64 : Floating-point convert + + /// + /// svint64_t svcvt_s64[_f16]_m(svint64_t inactive, svbool_t pg, svfloat16_t op) + /// svint64_t svcvt_s64[_f16]_x(svbool_t pg, svfloat16_t op) + /// svint64_t svcvt_s64[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector ConvertToInt64(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ConvertToSingle : Floating-point convert + + /// + /// svfloat32_t svcvt_f32[_f16]_m(svfloat32_t inactive, svbool_t pg, svfloat16_t op) + /// svfloat32_t svcvt_f32[_f16]_x(svbool_t pg, svfloat16_t op) + /// svfloat32_t svcvt_f32[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector ConvertToSingle(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ConvertToUInt16 : Floating-point convert + + /// + /// svuint16_t svcvt_u16[_f16]_m(svuint16_t inactive, svbool_t pg, svfloat16_t op) + /// svuint16_t svcvt_u16[_f16]_x(svbool_t pg, svfloat16_t op) + /// svuint16_t svcvt_u16[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector ConvertToUInt16(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ConvertToUInt32 : Floating-point convert + + /// + /// svuint32_t svcvt_u32[_f16]_m(svuint32_t inactive, svbool_t pg, svfloat16_t op) + /// svuint32_t svcvt_u32[_f16]_x(svbool_t pg, svfloat16_t op) + /// svuint32_t svcvt_u32[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector ConvertToUInt32(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ConvertToUInt64 : Floating-point convert + + /// + /// svuint64_t svcvt_u64[_f16]_m(svuint64_t inactive, svbool_t pg, svfloat16_t op) + /// svuint64_t svcvt_u64[_f16]_x(svbool_t pg, svfloat16_t op) + /// svuint64_t svcvt_u64[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector ConvertToUInt64(Vector value) { throw new PlatformNotSupportedException(); } + + + + /// CreateFalseMaskHalf : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// + public static unsafe Vector CreateFalseMaskHalf() { throw new PlatformNotSupportedException(); } + + + /// CreateTrueMaskHalf : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b8(enum svpattern pattern) + /// + public static unsafe Vector CreateTrueMaskHalf([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw new PlatformNotSupportedException(); } + + + /// CreateWhileReadAfterWriteMask : While free of read-after-write conflicts + + /// + /// svbool_t svwhilerw[_f16](const float16_t *op1, const float16_t *op2) + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(half* left, half* right) { throw new PlatformNotSupportedException(); } + + + /// CreateWhileWriteAfterReadMask : While free of write-after-read conflicts + + /// + /// svbool_t svwhilewr[_f16](const float16_t *op1, const float16_t *op2) + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(half* left, half* right) { throw new PlatformNotSupportedException(); } + + + /// Divide : Divide + + /// + /// svfloat16_t svdiv[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svdiv[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svdiv[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector Divide(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + + /// DownConvertNarrowingUpper : Down convert and narrow (top) + + /// + /// svfloat16_t svcvtnt_f16[_f32]_m(svfloat16_t even, svbool_t pg, svfloat32_t op) + /// svfloat16_t svcvtnt_f16[_f32]_x(svfloat16_t even, svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector DownConvertNarrowingUpper(Vector value) { throw new PlatformNotSupportedException(); } + + + /// DuplicateSelectedScalarToVector : Broadcast a scalar value + + /// + /// svfloat16_t svdup_lane[_f16](svfloat16_t data, uint16_t index) + /// svfloat16_t svdupq_lane[_f16](svfloat16_t data, uint64_t index) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + + /// ExtractAfterLastScalar : Extract element after last + + /// + /// float16_t svlasta[_f16](svbool_t pg, svfloat16_t op) + /// + public static unsafe half ExtractAfterLastScalar(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ExtractAfterLastVector : Extract element after last + + /// + /// float16_t svlasta[_f16](svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ExtractLastScalar : Extract last element + + /// + /// float16_t svlastb[_f16](svbool_t pg, svfloat16_t op) + /// + public static unsafe half ExtractLastScalar(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ExtractLastVector : Extract last element + + /// + /// float16_t svlastb[_f16](svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector ExtractLastVector(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ExtractVector : Extract vector from pair of vectors + + /// + /// svfloat16_t svext[_f16](svfloat16_t op1, svfloat16_t op2, uint64_t imm3) + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + + /// FloatingPointExponentialAccelerator : Floating-point exponential accelerator + + /// + /// svfloat16_t svexpa[_f16](svuint16_t op) + /// + public static unsafe Vector FloatingPointExponentialAccelerator(Vector value) { throw new PlatformNotSupportedException(); } + + + /// FusedMultiplyAdd : Multiply-add, addend first + + /// + /// svfloat16_t svmla[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// svfloat16_t svmla[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// svfloat16_t svmla[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// + public static unsafe Vector FusedMultiplyAdd(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// FusedMultiplyAddBySelectedScalar : Multiply-add, addend first + + /// + /// svfloat16_t svmla_lane[_f16](svfloat16_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index) + /// + public static unsafe Vector FusedMultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + + /// FusedMultiplyAddNegated : Negated multiply-add, addend first + + /// + /// svfloat16_t svnmla[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// svfloat16_t svnmla[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// svfloat16_t svnmla[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// + public static unsafe Vector FusedMultiplyAddNegated(Vector addend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// FusedMultiplySubtract : Multiply-subtract, minuend first + + /// + /// svfloat16_t svmls[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// svfloat16_t svmls[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// svfloat16_t svmls[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// + public static unsafe Vector FusedMultiplySubtract(Vector minuend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// FusedMultiplySubtractBySelectedScalar : Multiply-subtract, minuend first + + /// + /// svfloat16_t svmls_lane[_f16](svfloat16_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index) + /// + public static unsafe Vector FusedMultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + + /// FusedMultiplySubtractNegated : Negated multiply-subtract, minuend first + + /// + /// svfloat16_t svnmls[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// svfloat16_t svnmls[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// svfloat16_t svnmls[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// + public static unsafe Vector FusedMultiplySubtractNegated(Vector minuend, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// GetActiveElementCount : Count set predicate bits + + /// + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) { throw new PlatformNotSupportedException(); } + + + /// InsertIntoShiftedVector : Insert scalar into shifted vector + + /// + /// svfloat16_t svinsr[_n_f16](svfloat16_t op1, float16_t op2) + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, half right) { throw new PlatformNotSupportedException(); } + + + /// InterleaveEvenInt128FromTwoInputs : Interleave even quadwords from two inputs + + /// + /// svfloat16_t svtrn1q[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// InterleaveInt128FromHighHalvesOfTwoInputs : Interleave quadwords from high halves of two inputs + + /// + /// svfloat16_t svzip2q[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// InterleaveInt128FromLowHalvesOfTwoInputs : Interleave quadwords from low halves of two inputs + + /// + /// svfloat16_t svzip1q[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// InterleaveOddInt128FromTwoInputs : Interleave odd quadwords from two inputs + + /// + /// svfloat16_t svtrn2q[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// LoadVector : Unextended load + + /// + /// svfloat16_t svld1[_f16](svbool_t pg, const float16_t *base) + /// + public static unsafe Vector LoadVector(Vector mask, half* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVector128AndReplicateToVector : Load and replicate 128 bits of data + + /// + /// svfloat16_t svld1rq[_f16](svbool_t pg, const float16_t *base) + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, half* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVector256AndReplicateToVector : Load and replicate 256 bits of data + + /// + /// svfloat16_t svld1ro[_f16](svbool_t pg, const float16_t *base) + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, half* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorFirstFaulting : Unextended load, first-faulting + + /// + /// svfloat16_t svldff1[_f16](svbool_t pg, const float16_t *base) + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, half* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorNonFaulting : Unextended load, non-faulting + + /// + /// svfloat16_t svldnf1[_f16](svbool_t pg, const float16_t *base) + /// + public static unsafe Vector LoadVectorNonFaulting(half* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorNonTemporal : Unextended load, non-temporal + + /// + /// svfloat16_t svldnt1[_f16](svbool_t pg, const float16_t *base) + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, half* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorx2 : Load two-element tuples into two vectors + + /// + /// svfloat16x2_t svld2[_f16](svbool_t pg, const float16_t *base) + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, half* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorx3 : Load three-element tuples into three vectors + + /// + /// svfloat16x3_t svld3[_f16](svbool_t pg, const float16_t *base) + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, half* address) { throw new PlatformNotSupportedException(); } + + + /// LoadVectorx4 : Load four-element tuples into four vectors + + /// + /// svfloat16x4_t svld4[_f16](svbool_t pg, const float16_t *base) + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, half* address) { throw new PlatformNotSupportedException(); } + + + /// Log2 : Base 2 logarithm as integer + + /// + /// svint16_t svlogb[_f16]_m(svint16_t inactive, svbool_t pg, svfloat16_t op) + /// svint16_t svlogb[_f16]_x(svbool_t pg, svfloat16_t op) + /// svint16_t svlogb[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector Log2(Vector value) { throw new PlatformNotSupportedException(); } + + + /// Max : Maximum + + /// + /// svfloat16_t svmax[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svmax[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svmax[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector Max(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// MaxAcross : Maximum reduction to scalar + + /// + /// float16_t svmaxv[_f16](svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector MaxAcross(Vector value) { throw new PlatformNotSupportedException(); } + + + /// MaxNumber : Maximum number + + /// + /// svfloat16_t svmaxnm[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svmaxnm[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svmaxnm[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector MaxNumber(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// MaxNumberAcross : Maximum number reduction to scalar + + /// + /// float16_t svmaxnmv[_f16](svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector MaxNumberAcross(Vector value) { throw new PlatformNotSupportedException(); } + + + /// MaxNumberPairwise : Maximum number pairwise + + /// + /// svfloat16_t svmaxnmp[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svmaxnmp[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector MaxNumberPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// MaxPairwise : Maximum pairwise + + /// + /// svfloat16_t svmaxp[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svmaxp[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// Min : Minimum + + /// + /// svfloat16_t svmin[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svmin[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svmin[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector Min(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// MinAcross : Minimum reduction to scalar + + /// + /// float16_t svminv[_f16](svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector MinAcross(Vector value) { throw new PlatformNotSupportedException(); } + + + /// MinNumber : Minimum number + + /// + /// svfloat16_t svminnm[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svminnm[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svminnm[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector MinNumber(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// MinNumberAcross : Minimum number reduction to scalar + + /// + /// float16_t svminnmv[_f16](svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector MinNumberAcross(Vector value) { throw new PlatformNotSupportedException(); } + + + /// MinNumberPairwise : Minimum number pairwise + + /// + /// svfloat16_t svminnmp[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svminnmp[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector MinNumberPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// MinPairwise : Minimum pairwise + + /// + /// svfloat16_t svminp[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svminp[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// Multiply : Multiply + + /// + /// svfloat16_t svmul[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svmul[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svmul[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector Multiply(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + + + + /// MultiplyAddRotateComplex : Complex multiply-add with rotate + + /// + /// svfloat16_t svcmla[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_rotation) + /// svfloat16_t svcmla[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_rotation) + /// svfloat16_t svcmla[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + + /// MultiplyAddRotateComplexBySelectedScalar : Complex multiply-add with rotate + + /// + /// svfloat16_t svcmla_lane[_f16](svfloat16_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation) { throw new PlatformNotSupportedException(); } + + + /// MultiplyAddWideningLower : Multiply-add long (bottom) + + /// + /// svfloat32_t svmlalb[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3) + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svmlalb_lane[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + + /// MultiplyAddWideningUpper : Multiply-add long (top) + + /// + /// svfloat32_t svmlalt[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3) + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svmlalt_lane[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + + /// MultiplyBySelectedScalar : Multiply + + /// + /// svfloat16_t svmul_lane[_f16](svfloat16_t op1, svfloat16_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex) { throw new PlatformNotSupportedException(); } + + + /// MultiplyExtended : Multiply extended (∞×0=2) + + /// + /// svfloat16_t svmulx[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svmulx[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svmulx[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector MultiplyExtended(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + + + + /// MultiplySubtractWideningLower : Multiply-subtract long (bottom) + + /// + /// svfloat32_t svmlslb[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3) + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svmlslb_lane[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + + /// MultiplySubtractWideningUpper : Multiply-subtract long (top) + + /// + /// svfloat32_t svmlslt[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3) + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svmlslt_lane[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + + /// Negate : Negate + + /// + /// svfloat16_t svneg[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) + /// svfloat16_t svneg[_f16]_x(svbool_t pg, svfloat16_t op) + /// svfloat16_t svneg[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector Negate(Vector value) { throw new PlatformNotSupportedException(); } + + + + + /// PopCount : Count nonzero bits + + /// + /// svuint16_t svcnt[_f16]_m(svuint16_t inactive, svbool_t pg, svfloat16_t op) + /// svuint16_t svcnt[_f16]_x(svbool_t pg, svfloat16_t op) + /// svuint16_t svcnt[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector PopCount(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ReciprocalEstimate : Reciprocal estimate + + /// + /// svfloat16_t svrecpe[_f16](svfloat16_t op) + /// + public static unsafe Vector ReciprocalEstimate(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ReciprocalExponent : Reciprocal exponent + + /// + /// svfloat16_t svrecpx[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) + /// svfloat16_t svrecpx[_f16]_x(svbool_t pg, svfloat16_t op) + /// svfloat16_t svrecpx[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector ReciprocalExponent(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ReciprocalSqrtEstimate : Reciprocal square root estimate + + /// + /// svfloat16_t svrsqrte[_f16](svfloat16_t op) + /// + public static unsafe Vector ReciprocalSqrtEstimate(Vector value) { throw new PlatformNotSupportedException(); } + + + /// ReciprocalSqrtStep : Reciprocal square root step + + /// + /// svfloat16_t svrsqrts[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector ReciprocalSqrtStep(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// ReciprocalStep : Reciprocal step + + /// + /// svfloat16_t svrecps[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector ReciprocalStep(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// ReverseElement : Reverse all elements + + /// + /// svfloat16_t svrev[_f16](svfloat16_t op) + /// + public static unsafe Vector ReverseElement(Vector value) { throw new PlatformNotSupportedException(); } + + + /// RoundAwayFromZero : Round to nearest, ties away from zero + + /// + /// svfloat16_t svrinta[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) + /// svfloat16_t svrinta[_f16]_x(svbool_t pg, svfloat16_t op) + /// svfloat16_t svrinta[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector RoundAwayFromZero(Vector value) { throw new PlatformNotSupportedException(); } + + + /// RoundToNearest : Round to nearest, ties to even + + /// + /// svfloat16_t svrintn[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) + /// svfloat16_t svrintn[_f16]_x(svbool_t pg, svfloat16_t op) + /// svfloat16_t svrintn[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector RoundToNearest(Vector value) { throw new PlatformNotSupportedException(); } + + + /// RoundToNegativeInfinity : Round towards -∞ + + /// + /// svfloat16_t svrintm[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) + /// svfloat16_t svrintm[_f16]_x(svbool_t pg, svfloat16_t op) + /// svfloat16_t svrintm[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector RoundToNegativeInfinity(Vector value) { throw new PlatformNotSupportedException(); } + + + /// RoundToPositiveInfinity : Round towards +∞ + + /// + /// svfloat16_t svrintp[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) + /// svfloat16_t svrintp[_f16]_x(svbool_t pg, svfloat16_t op) + /// svfloat16_t svrintp[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector RoundToPositiveInfinity(Vector value) { throw new PlatformNotSupportedException(); } + + + /// RoundToZero : Round towards zero + + /// + /// svfloat16_t svrintz[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) + /// svfloat16_t svrintz[_f16]_x(svbool_t pg, svfloat16_t op) + /// svfloat16_t svrintz[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector RoundToZero(Vector value) { throw new PlatformNotSupportedException(); } + + + + + /// Scale : Adjust exponent + + /// + /// svfloat16_t svscale[_f16]_m(svbool_t pg, svfloat16_t op1, svint16_t op2) + /// svfloat16_t svscale[_f16]_x(svbool_t pg, svfloat16_t op1, svint16_t op2) + /// svfloat16_t svscale[_f16]_z(svbool_t pg, svfloat16_t op1, svint16_t op2) + /// + public static unsafe Vector Scale(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// Splice : Splice two vectors under predicate control + + /// + /// svfloat16_t svsplice[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// Sqrt : Square root + + /// + /// svfloat16_t svsqrt[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) + /// svfloat16_t svsqrt[_f16]_x(svbool_t pg, svfloat16_t op) + /// svfloat16_t svsqrt[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector Sqrt(Vector value) { throw new PlatformNotSupportedException(); } + + + /// Store : Non-truncating store + + /// + /// void svst1[_f16](svbool_t pg, float16_t *base, svfloat16_t data) + /// + public static unsafe void Store(Vector mask, half* address, Vector data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst2[_f16](svbool_t pg, float16_t *base, svfloat16x2_t data) + /// + public static unsafe void Store(Vector mask, half* address, (Vector Value1, Vector Value2) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst3[_f16](svbool_t pg, float16_t *base, svfloat16x3_t data) + /// + public static unsafe void Store(Vector mask, half* address, (Vector Value1, Vector Value2, Vector Value3) data) { throw new PlatformNotSupportedException(); } + + /// + /// void svst4[_f16](svbool_t pg, float16_t *base, svfloat16x4_t data) + /// + public static unsafe void Store(Vector mask, half* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) { throw new PlatformNotSupportedException(); } + + + /// StoreNonTemporal : Non-truncating store, non-temporal + + /// + /// void svstnt1[_f16](svbool_t pg, float16_t *base, svfloat16_t data) + /// + public static unsafe void StoreNonTemporal(Vector mask, half* address, Vector data) { throw new PlatformNotSupportedException(); } + + + /// Subtract : Subtract + + /// + /// svfloat16_t svsub[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svsub[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svsub[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector Subtract(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + + /// TransposeEven : Interleave even elements from two inputs + + /// + /// svfloat16_t svtrn1[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// TransposeOdd : Interleave odd elements from two inputs + + /// + /// svfloat16_t svtrn2[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// TrigonometricMultiplyAddCoefficient : Trigonometric multiply-add coefficient + + /// + /// svfloat16_t svtmad[_f16](svfloat16_t op1, svfloat16_t op2, uint64_t imm3) + /// + public static unsafe Vector TrigonometricMultiplyAddCoefficient(Vector left, Vector right, [ConstantExpected] byte control) { throw new PlatformNotSupportedException(); } + + + /// TrigonometricSelectCoefficient : Trigonometric select coefficient + + /// + /// svfloat16_t svtssel[_f16](svfloat16_t op1, svuint16_t op2) + /// + public static unsafe Vector TrigonometricSelectCoefficient(Vector value, Vector selector) { throw new PlatformNotSupportedException(); } + + + /// TrigonometricStartingValue : Trigonometric starting value + + /// + /// svfloat16_t svtsmul[_f16](svfloat16_t op1, svuint16_t op2) + /// + public static unsafe Vector TrigonometricStartingValue(Vector value, Vector sign) { throw new PlatformNotSupportedException(); } + + + /// UnzipEven : Concatenate even elements from two inputs + + /// + /// svfloat16_t svuzp1[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// UnzipOdd : Concatenate odd elements from two inputs + + /// + /// svfloat16_t svuzp2[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// UpConvertWideningUpper : Up convert long (top) + + /// + /// svfloat32_t svcvtlt_f32[_f16]_m(svfloat32_t inactive, svbool_t pg, svfloat16_t op) + /// svfloat32_t svcvtlt_f32[_f16]_x(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector UpConvertWideningUpper(Vector value) { throw new PlatformNotSupportedException(); } + + + /// VectorTableLookup : Table lookup in single-vector table + + /// + /// svfloat16_t svtbl[_f16](svfloat16_t data, svuint16_t indices) + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16_t svtbl2[_f16](svfloat16x2_t data, svuint16_t indices) + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) { throw new PlatformNotSupportedException(); } + + + /// VectorTableLookupExtension : Table lookup in single-vector table (merging) + + /// + /// svfloat16_t svtbx[_f16](svfloat16_t fallback, svfloat16_t data, svuint16_t indices) + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) { throw new PlatformNotSupportedException(); } + + + /// ZipHigh : Interleave elements from high halves of two inputs + + /// + /// svfloat16_t svzip2[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// ZipLow : Interleave elements from low halves of two inputs + + /// + /// svfloat16_t svzip1[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector ZipLow(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + } +} + diff --git a/sve_api/out_cs_api/SveFp16.System.Runtime.Intrinsics.cs b/sve_api/out_cs_api/SveFp16.System.Runtime.Intrinsics.cs new file mode 100644 index 0000000000000..d5cb65c4e68cc --- /dev/null +++ b/sve_api/out_cs_api/SveFp16.System.Runtime.Intrinsics.cs @@ -0,0 +1,139 @@ + public static System.Numerics.Vector Abs(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector AbsoluteCompareGreaterThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteCompareGreaterThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteCompareLessThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteCompareLessThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AbsoluteDifference(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Add(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector AddPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector AddRotateComplex(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector AddSequentialAcross(System.Numerics.Vector initial, System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector CompareEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareGreaterThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThan(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareLessThanOrEqual(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareNotEqualTo(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector CompareUnordered(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConcatenateEvenInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConcatenateOddInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConditionalExtractAfterLastActiveElement(System.Numerics.Vector mask, System.Numerics.Vector defaultValue, System.Numerics.Vector data) { throw null; } + public static half ConditionalExtractAfterLastActiveElement(System.Numerics.Vector mask, half defaultValues, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractAfterLastActiveElementAndReplicate(System.Numerics.Vector mask, System.Numerics.Vector defaultScalar, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractLastActiveElement(System.Numerics.Vector mask, System.Numerics.Vector defaultValue, System.Numerics.Vector data) { throw null; } + public static half ConditionalExtractLastActiveElement(System.Numerics.Vector mask, half defaultValues, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalExtractLastActiveElementAndReplicate(System.Numerics.Vector mask, System.Numerics.Vector fallback, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector ConditionalSelect(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ConvertToDouble(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToHalf(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToHalf(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToHalf(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToHalf(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToHalf(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToHalf(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToHalf(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToHalf(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToInt16(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToInt32(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToInt64(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToSingle(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToUInt16(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToUInt32(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ConvertToUInt64(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector CreateFalseMaskHalf() { throw null; } + public static System.Numerics.Vector CreateTrueMaskHalf([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) { throw null; } + public static unsafe System.Numerics.Vector CreateWhileReadAfterWriteMask(half* left, half* right) { throw null; } + public static unsafe System.Numerics.Vector CreateWhileWriteAfterReadMask(half* left, half* right) { throw null; } + public static System.Numerics.Vector Divide(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector DownConvertNarrowingUpper(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(System.Numerics.Vector data, [ConstantExpected] byte index) { throw null; } + public static half ExtractAfterLastScalar(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ExtractAfterLastVector(System.Numerics.Vector value) { throw null; } + public static half ExtractLastScalar(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ExtractLastVector(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ExtractVector(System.Numerics.Vector upper, System.Numerics.Vector lower, [ConstantExpected] byte index) { throw null; } + public static System.Numerics.Vector FloatingPointExponentialAccelerator(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector FusedMultiplyAdd(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector FusedMultiplyAddBySelectedScalar(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector FusedMultiplyAddNegated(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector FusedMultiplySubtract(System.Numerics.Vector minuend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector FusedMultiplySubtractBySelectedScalar(System.Numerics.Vector minuend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector FusedMultiplySubtractNegated(System.Numerics.Vector minuend, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static ulong GetActiveElementCount(System.Numerics.Vector mask, System.Numerics.Vector from) { throw null; } + public static System.Numerics.Vector InsertIntoShiftedVector(System.Numerics.Vector left, half right) { throw null; } + public static System.Numerics.Vector InterleaveEvenInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveInt128FromHighHalvesOfTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveInt128FromLowHalvesOfTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector InterleaveOddInt128FromTwoInputs(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static unsafe System.Numerics.Vector LoadVector(System.Numerics.Vector mask, half* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector128AndReplicateToVector(System.Numerics.Vector mask, half* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVector256AndReplicateToVector(System.Numerics.Vector mask, half* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorFirstFaulting(System.Numerics.Vector mask, half* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorNonFaulting(half* address) { throw null; } + public static unsafe System.Numerics.Vector LoadVectorNonTemporal(System.Numerics.Vector mask, half* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector) LoadVectorx2(System.Numerics.Vector mask, half* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) LoadVectorx3(System.Numerics.Vector mask, half* address) { throw null; } + public static unsafe (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) LoadVectorx4(System.Numerics.Vector mask, half* address) { throw null; } + public static System.Numerics.Vector Log2(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Max(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MaxAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MaxNumber(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MaxNumberAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MaxNumberPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MaxPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Min(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MinAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MinNumber(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MinNumberAcross(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector MinNumberPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MinPairwise(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Multiply(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplyAddRotateComplex(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector MultiplyAddRotateComplexBySelectedScalar(System.Numerics.Vector addend, System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation) { throw null; } + public static System.Numerics.Vector MultiplyAddWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplyAddWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector MultiplyAddWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplyAddWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector MultiplyBySelectedScalar(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte rightIndex) { throw null; } + public static System.Numerics.Vector MultiplyExtended(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector MultiplySubtractWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplySubtractWideningLower(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector MultiplySubtractWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MultiplySubtractWideningUpper(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector Negate(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector PopCount(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReciprocalEstimate(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReciprocalExponent(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReciprocalSqrtEstimate(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReciprocalSqrtStep(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ReciprocalStep(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ReverseElement(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector RoundAwayFromZero(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector RoundToNearest(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector RoundToNegativeInfinity(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector RoundToPositiveInfinity(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector RoundToZero(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector Scale(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Splice(System.Numerics.Vector mask, System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Sqrt(System.Numerics.Vector value) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, half* address, System.Numerics.Vector data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, half* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, half* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2, System.Numerics.Vector Value3) data) { throw null; } + public static unsafe void Store(System.Numerics.Vector mask, half* address, (System.Numerics.Vector Value1, System.Numerics.Vector Value2, System.Numerics.Vector Value3, System.Numerics.Vector Value4) data) { throw null; } + public static unsafe void StoreNonTemporal(System.Numerics.Vector mask, half* address, System.Numerics.Vector data) { throw null; } + public static System.Numerics.Vector Subtract(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector TransposeEven(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector TransposeOdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector TrigonometricMultiplyAddCoefficient(System.Numerics.Vector left, System.Numerics.Vector right, [ConstantExpected] byte control) { throw null; } + public static System.Numerics.Vector TrigonometricSelectCoefficient(System.Numerics.Vector value, System.Numerics.Vector selector) { throw null; } + public static System.Numerics.Vector TrigonometricStartingValue(System.Numerics.Vector value, System.Numerics.Vector sign) { throw null; } + public static System.Numerics.Vector UnzipEven(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector UnzipOdd(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector UpConvertWideningUpper(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector VectorTableLookup(System.Numerics.Vector data, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookup((System.Numerics.Vector data1, System.Numerics.Vector data2), System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector VectorTableLookupExtension(System.Numerics.Vector fallback, System.Numerics.Vector data, System.Numerics.Vector indices) { throw null; } + public static System.Numerics.Vector ZipHigh(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector ZipLow(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + diff --git a/sve_api/out_cs_api/SveFp16.cs b/sve_api/out_cs_api/SveFp16.cs new file mode 100644 index 0000000000000..95e6bb0f057f7 --- /dev/null +++ b/sve_api/out_cs_api/SveFp16.cs @@ -0,0 +1,1195 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveFp16 : AdvSimd + { + internal SveFp16() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// Abs : Absolute value + + /// + /// svfloat16_t svabs[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) + /// svfloat16_t svabs[_f16]_x(svbool_t pg, svfloat16_t op) + /// svfloat16_t svabs[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector Abs(Vector value) => Abs(value); + + + /// AbsoluteCompareGreaterThan : Absolute compare greater than + + /// + /// svbool_t svacgt[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector AbsoluteCompareGreaterThan(Vector left, Vector right) => AbsoluteCompareGreaterThan(left, right); + + + /// AbsoluteCompareGreaterThanOrEqual : Absolute compare greater than or equal to + + /// + /// svbool_t svacge[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector AbsoluteCompareGreaterThanOrEqual(Vector left, Vector right) => AbsoluteCompareGreaterThanOrEqual(left, right); + + + /// AbsoluteCompareLessThan : Absolute compare less than + + /// + /// svbool_t svaclt[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector AbsoluteCompareLessThan(Vector left, Vector right) => AbsoluteCompareLessThan(left, right); + + + /// AbsoluteCompareLessThanOrEqual : Absolute compare less than or equal to + + /// + /// svbool_t svacle[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector AbsoluteCompareLessThanOrEqual(Vector left, Vector right) => AbsoluteCompareLessThanOrEqual(left, right); + + + /// AbsoluteDifference : Absolute difference + + /// + /// svfloat16_t svabd[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svabd[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svabd[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) => AbsoluteDifference(left, right); + + + /// Add : Add + + /// + /// svfloat16_t svadd[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svadd[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svadd[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector Add(Vector left, Vector right) => Add(left, right); + + + /// AddAcross : Add reduction + + /// + /// float16_t svaddv[_f16](svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector AddAcross(Vector value) => AddAcross(value); + + + /// AddPairwise : Add pairwise + + /// + /// svfloat16_t svaddp[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svaddp[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) => AddPairwise(left, right); + + + /// AddRotateComplex : Complex add with rotate + + /// + /// svfloat16_t svcadd[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2, uint64_t imm_rotation) + /// svfloat16_t svcadd[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2, uint64_t imm_rotation) + /// svfloat16_t svcadd[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2, uint64_t imm_rotation) + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) => AddRotateComplex(left, right, rotation); + + + /// AddSequentialAcross : Add reduction (strictly-ordered) + + /// + /// float16_t svadda[_f16](svbool_t pg, float16_t initial, svfloat16_t op) + /// + public static unsafe Vector AddSequentialAcross(Vector initial, Vector value) => AddSequentialAcross(initial, value); + + + /// CompareEqual : Compare equal to + + /// + /// svbool_t svcmpeq[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + + /// CompareGreaterThan : Compare greater than + + /// + /// svbool_t svcmpgt[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + + /// CompareGreaterThanOrEqual : Compare greater than or equal to + + /// + /// svbool_t svcmpge[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + + /// CompareLessThan : Compare less than + + /// + /// svbool_t svcmplt[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + + /// CompareLessThanOrEqual : Compare less than or equal to + + /// + /// svbool_t svcmple[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + + /// CompareNotEqualTo : Compare not equal to + + /// + /// svbool_t svcmpne[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + + /// CompareUnordered : Compare unordered with + + /// + /// svbool_t svcmpuo[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector CompareUnordered(Vector left, Vector right) => CompareUnordered(left, right); + + + /// ConcatenateEvenInt128FromTwoInputs : Concatenate even quadwords from two inputs + + /// + /// svfloat16_t svuzp1q[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) => ConcatenateEvenInt128FromTwoInputs(left, right); + + + /// ConcatenateOddInt128FromTwoInputs : Concatenate odd quadwords from two inputs + + /// + /// svfloat16_t svuzp2q[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) => ConcatenateOddInt128FromTwoInputs(left, right); + + + /// ConditionalExtractAfterLastActiveElement : Conditionally extract element after last + + /// + /// svfloat16_t svclasta[_f16](svbool_t pg, svfloat16_t fallback, svfloat16_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValue, data); + + /// + /// svfloat16_t svclasta[_f16](svbool_t pg, svfloat16_t fallback, svfloat16_t data) + /// float16_t svclasta[_n_f16](svbool_t pg, float16_t fallback, svfloat16_t data) + /// + public static unsafe half ConditionalExtractAfterLastActiveElement(Vector mask, half defaultValues, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValues, data); + + + /// ConditionalExtractAfterLastActiveElementAndReplicate : Conditionally extract element after last + + /// + /// svfloat16_t svclasta[_f16](svbool_t pg, svfloat16_t fallback, svfloat16_t data) + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) => ConditionalExtractAfterLastActiveElementAndReplicate(mask, defaultScalar, data); + + + /// ConditionalExtractLastActiveElement : Conditionally extract last element + + /// + /// svfloat16_t svclastb[_f16](svbool_t pg, svfloat16_t fallback, svfloat16_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValue, data); + + /// + /// svfloat16_t svclastb[_f16](svbool_t pg, svfloat16_t fallback, svfloat16_t data) + /// float16_t svclastb[_n_f16](svbool_t pg, float16_t fallback, svfloat16_t data) + /// + public static unsafe half ConditionalExtractLastActiveElement(Vector mask, half defaultValues, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValues, data); + + + /// ConditionalExtractLastActiveElementAndReplicate : Conditionally extract last element + + /// + /// svfloat16_t svclastb[_f16](svbool_t pg, svfloat16_t fallback, svfloat16_t data) + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) => ConditionalExtractLastActiveElementAndReplicate(mask, fallback, data); + + + /// ConditionalSelect : Conditionally select elements + + /// + /// svfloat16_t svsel[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) => ConditionalSelect(mask, left, right); + + + /// ConvertToDouble : Floating-point convert + + /// + /// svfloat64_t svcvt_f64[_f16]_m(svfloat64_t inactive, svbool_t pg, svfloat16_t op) + /// svfloat64_t svcvt_f64[_f16]_x(svbool_t pg, svfloat16_t op) + /// svfloat64_t svcvt_f64[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector ConvertToDouble(Vector value) => ConvertToDouble(value); + + + /// ConvertToHalf : Floating-point convert + + /// + /// svfloat16_t svcvt_f16[_s16]_m(svfloat16_t inactive, svbool_t pg, svint16_t op) + /// svfloat16_t svcvt_f16[_s16]_x(svbool_t pg, svint16_t op) + /// svfloat16_t svcvt_f16[_s16]_z(svbool_t pg, svint16_t op) + /// + public static unsafe Vector ConvertToHalf(Vector value) => ConvertToHalf(value); + + /// + /// svfloat16_t svcvt_f16[_s32]_m(svfloat16_t inactive, svbool_t pg, svint32_t op) + /// svfloat16_t svcvt_f16[_s32]_x(svbool_t pg, svint32_t op) + /// svfloat16_t svcvt_f16[_s32]_z(svbool_t pg, svint32_t op) + /// + public static unsafe Vector ConvertToHalf(Vector value) => ConvertToHalf(value); + + /// + /// svfloat16_t svcvt_f16[_s64]_m(svfloat16_t inactive, svbool_t pg, svint64_t op) + /// svfloat16_t svcvt_f16[_s64]_x(svbool_t pg, svint64_t op) + /// svfloat16_t svcvt_f16[_s64]_z(svbool_t pg, svint64_t op) + /// + public static unsafe Vector ConvertToHalf(Vector value) => ConvertToHalf(value); + + /// + /// svfloat16_t svcvt_f16[_u16]_m(svfloat16_t inactive, svbool_t pg, svuint16_t op) + /// svfloat16_t svcvt_f16[_u16]_x(svbool_t pg, svuint16_t op) + /// svfloat16_t svcvt_f16[_u16]_z(svbool_t pg, svuint16_t op) + /// + public static unsafe Vector ConvertToHalf(Vector value) => ConvertToHalf(value); + + /// + /// svfloat16_t svcvt_f16[_u32]_m(svfloat16_t inactive, svbool_t pg, svuint32_t op) + /// svfloat16_t svcvt_f16[_u32]_x(svbool_t pg, svuint32_t op) + /// svfloat16_t svcvt_f16[_u32]_z(svbool_t pg, svuint32_t op) + /// + public static unsafe Vector ConvertToHalf(Vector value) => ConvertToHalf(value); + + /// + /// svfloat16_t svcvt_f16[_u64]_m(svfloat16_t inactive, svbool_t pg, svuint64_t op) + /// svfloat16_t svcvt_f16[_u64]_x(svbool_t pg, svuint64_t op) + /// svfloat16_t svcvt_f16[_u64]_z(svbool_t pg, svuint64_t op) + /// + public static unsafe Vector ConvertToHalf(Vector value) => ConvertToHalf(value); + + /// + /// svfloat16_t svcvt_f16[_f32]_m(svfloat16_t inactive, svbool_t pg, svfloat32_t op) + /// svfloat16_t svcvt_f16[_f32]_x(svbool_t pg, svfloat32_t op) + /// svfloat16_t svcvt_f16[_f32]_z(svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector ConvertToHalf(Vector value) => ConvertToHalf(value); + + /// + /// svfloat16_t svcvt_f16[_f64]_m(svfloat16_t inactive, svbool_t pg, svfloat64_t op) + /// svfloat16_t svcvt_f16[_f64]_x(svbool_t pg, svfloat64_t op) + /// svfloat16_t svcvt_f16[_f64]_z(svbool_t pg, svfloat64_t op) + /// + public static unsafe Vector ConvertToHalf(Vector value) => ConvertToHalf(value); + + + /// ConvertToInt16 : Floating-point convert + + /// + /// svint16_t svcvt_s16[_f16]_m(svint16_t inactive, svbool_t pg, svfloat16_t op) + /// svint16_t svcvt_s16[_f16]_x(svbool_t pg, svfloat16_t op) + /// svint16_t svcvt_s16[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector ConvertToInt16(Vector value) => ConvertToInt16(value); + + + /// ConvertToInt32 : Floating-point convert + + /// + /// svint32_t svcvt_s32[_f16]_m(svint32_t inactive, svbool_t pg, svfloat16_t op) + /// svint32_t svcvt_s32[_f16]_x(svbool_t pg, svfloat16_t op) + /// svint32_t svcvt_s32[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector ConvertToInt32(Vector value) => ConvertToInt32(value); + + + /// ConvertToInt64 : Floating-point convert + + /// + /// svint64_t svcvt_s64[_f16]_m(svint64_t inactive, svbool_t pg, svfloat16_t op) + /// svint64_t svcvt_s64[_f16]_x(svbool_t pg, svfloat16_t op) + /// svint64_t svcvt_s64[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector ConvertToInt64(Vector value) => ConvertToInt64(value); + + + /// ConvertToSingle : Floating-point convert + + /// + /// svfloat32_t svcvt_f32[_f16]_m(svfloat32_t inactive, svbool_t pg, svfloat16_t op) + /// svfloat32_t svcvt_f32[_f16]_x(svbool_t pg, svfloat16_t op) + /// svfloat32_t svcvt_f32[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector ConvertToSingle(Vector value) => ConvertToSingle(value); + + + /// ConvertToUInt16 : Floating-point convert + + /// + /// svuint16_t svcvt_u16[_f16]_m(svuint16_t inactive, svbool_t pg, svfloat16_t op) + /// svuint16_t svcvt_u16[_f16]_x(svbool_t pg, svfloat16_t op) + /// svuint16_t svcvt_u16[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector ConvertToUInt16(Vector value) => ConvertToUInt16(value); + + + /// ConvertToUInt32 : Floating-point convert + + /// + /// svuint32_t svcvt_u32[_f16]_m(svuint32_t inactive, svbool_t pg, svfloat16_t op) + /// svuint32_t svcvt_u32[_f16]_x(svbool_t pg, svfloat16_t op) + /// svuint32_t svcvt_u32[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector ConvertToUInt32(Vector value) => ConvertToUInt32(value); + + + /// ConvertToUInt64 : Floating-point convert + + /// + /// svuint64_t svcvt_u64[_f16]_m(svuint64_t inactive, svbool_t pg, svfloat16_t op) + /// svuint64_t svcvt_u64[_f16]_x(svbool_t pg, svfloat16_t op) + /// svuint64_t svcvt_u64[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector ConvertToUInt64(Vector value) => ConvertToUInt64(value); + + + + /// CreateFalseMaskHalf : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// + public static unsafe Vector CreateFalseMaskHalf() => CreateFalseMaskHalf(); + + + /// CreateTrueMaskHalf : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b8(enum svpattern pattern) + /// + public static unsafe Vector CreateTrueMaskHalf([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => CreateTrueMaskHalf(pattern); + + + /// CreateWhileReadAfterWriteMask : While free of read-after-write conflicts + + /// + /// svbool_t svwhilerw[_f16](const float16_t *op1, const float16_t *op2) + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(half* left, half* right) => CreateWhileReadAfterWriteMask(left, right); + + + /// CreateWhileWriteAfterReadMask : While free of write-after-read conflicts + + /// + /// svbool_t svwhilewr[_f16](const float16_t *op1, const float16_t *op2) + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(half* left, half* right) => CreateWhileWriteAfterReadMask(left, right); + + + /// Divide : Divide + + /// + /// svfloat16_t svdiv[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svdiv[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svdiv[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector Divide(Vector left, Vector right) => Divide(left, right); + + + + /// DownConvertNarrowingUpper : Down convert and narrow (top) + + /// + /// svfloat16_t svcvtnt_f16[_f32]_m(svfloat16_t even, svbool_t pg, svfloat32_t op) + /// svfloat16_t svcvtnt_f16[_f32]_x(svfloat16_t even, svbool_t pg, svfloat32_t op) + /// + public static unsafe Vector DownConvertNarrowingUpper(Vector value) => DownConvertNarrowingUpper(value); + + + /// DuplicateSelectedScalarToVector : Broadcast a scalar value + + /// + /// svfloat16_t svdup_lane[_f16](svfloat16_t data, uint16_t index) + /// svfloat16_t svdupq_lane[_f16](svfloat16_t data, uint64_t index) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(data, index); + + + /// ExtractAfterLastScalar : Extract element after last + + /// + /// float16_t svlasta[_f16](svbool_t pg, svfloat16_t op) + /// + public static unsafe half ExtractAfterLastScalar(Vector value) => ExtractAfterLastScalar(value); + + + /// ExtractAfterLastVector : Extract element after last + + /// + /// float16_t svlasta[_f16](svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) => ExtractAfterLastVector(value); + + + /// ExtractLastScalar : Extract last element + + /// + /// float16_t svlastb[_f16](svbool_t pg, svfloat16_t op) + /// + public static unsafe half ExtractLastScalar(Vector value) => ExtractLastScalar(value); + + + /// ExtractLastVector : Extract last element + + /// + /// float16_t svlastb[_f16](svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector ExtractLastVector(Vector value) => ExtractLastVector(value); + + + /// ExtractVector : Extract vector from pair of vectors + + /// + /// svfloat16_t svext[_f16](svfloat16_t op1, svfloat16_t op2, uint64_t imm3) + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) => ExtractVector(upper, lower, index); + + + /// FloatingPointExponentialAccelerator : Floating-point exponential accelerator + + /// + /// svfloat16_t svexpa[_f16](svuint16_t op) + /// + public static unsafe Vector FloatingPointExponentialAccelerator(Vector value) => FloatingPointExponentialAccelerator(value); + + + /// FusedMultiplyAdd : Multiply-add, addend first + + /// + /// svfloat16_t svmla[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// svfloat16_t svmla[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// svfloat16_t svmla[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// + public static unsafe Vector FusedMultiplyAdd(Vector addend, Vector left, Vector right) => FusedMultiplyAdd(addend, left, right); + + + /// FusedMultiplyAddBySelectedScalar : Multiply-add, addend first + + /// + /// svfloat16_t svmla_lane[_f16](svfloat16_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index) + /// + public static unsafe Vector FusedMultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => FusedMultiplyAddBySelectedScalar(addend, left, right, rightIndex); + + + /// FusedMultiplyAddNegated : Negated multiply-add, addend first + + /// + /// svfloat16_t svnmla[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// svfloat16_t svnmla[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// svfloat16_t svnmla[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// + public static unsafe Vector FusedMultiplyAddNegated(Vector addend, Vector left, Vector right) => FusedMultiplyAddNegated(addend, left, right); + + + /// FusedMultiplySubtract : Multiply-subtract, minuend first + + /// + /// svfloat16_t svmls[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// svfloat16_t svmls[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// svfloat16_t svmls[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// + public static unsafe Vector FusedMultiplySubtract(Vector minuend, Vector left, Vector right) => FusedMultiplySubtract(minuend, left, right); + + + /// FusedMultiplySubtractBySelectedScalar : Multiply-subtract, minuend first + + /// + /// svfloat16_t svmls_lane[_f16](svfloat16_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index) + /// + public static unsafe Vector FusedMultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => FusedMultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); + + + /// FusedMultiplySubtractNegated : Negated multiply-subtract, minuend first + + /// + /// svfloat16_t svnmls[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// svfloat16_t svnmls[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// svfloat16_t svnmls[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// + public static unsafe Vector FusedMultiplySubtractNegated(Vector minuend, Vector left, Vector right) => FusedMultiplySubtractNegated(minuend, left, right); + + + /// GetActiveElementCount : Count set predicate bits + + /// + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) => GetActiveElementCount(mask, from); + + + /// InsertIntoShiftedVector : Insert scalar into shifted vector + + /// + /// svfloat16_t svinsr[_n_f16](svfloat16_t op1, float16_t op2) + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, half right) => InsertIntoShiftedVector(left, right); + + + /// InterleaveEvenInt128FromTwoInputs : Interleave even quadwords from two inputs + + /// + /// svfloat16_t svtrn1q[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) => InterleaveEvenInt128FromTwoInputs(left, right); + + + /// InterleaveInt128FromHighHalvesOfTwoInputs : Interleave quadwords from high halves of two inputs + + /// + /// svfloat16_t svzip2q[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromHighHalvesOfTwoInputs(left, right); + + + /// InterleaveInt128FromLowHalvesOfTwoInputs : Interleave quadwords from low halves of two inputs + + /// + /// svfloat16_t svzip1q[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromLowHalvesOfTwoInputs(left, right); + + + /// InterleaveOddInt128FromTwoInputs : Interleave odd quadwords from two inputs + + /// + /// svfloat16_t svtrn2q[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) => InterleaveOddInt128FromTwoInputs(left, right); + + + /// LoadVector : Unextended load + + /// + /// svfloat16_t svld1[_f16](svbool_t pg, const float16_t *base) + /// + public static unsafe Vector LoadVector(Vector mask, half* address) => LoadVector(mask, address); + + + /// LoadVector128AndReplicateToVector : Load and replicate 128 bits of data + + /// + /// svfloat16_t svld1rq[_f16](svbool_t pg, const float16_t *base) + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, half* address) => LoadVector128AndReplicateToVector(mask, address); + + + /// LoadVector256AndReplicateToVector : Load and replicate 256 bits of data + + /// + /// svfloat16_t svld1ro[_f16](svbool_t pg, const float16_t *base) + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, half* address) => LoadVector256AndReplicateToVector(mask, address); + + + /// LoadVectorFirstFaulting : Unextended load, first-faulting + + /// + /// svfloat16_t svldff1[_f16](svbool_t pg, const float16_t *base) + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, half* address) => LoadVectorFirstFaulting(mask, address); + + + /// LoadVectorNonFaulting : Unextended load, non-faulting + + /// + /// svfloat16_t svldnf1[_f16](svbool_t pg, const float16_t *base) + /// + public static unsafe Vector LoadVectorNonFaulting(half* address) => LoadVectorNonFaulting(address); + + + /// LoadVectorNonTemporal : Unextended load, non-temporal + + /// + /// svfloat16_t svldnt1[_f16](svbool_t pg, const float16_t *base) + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, half* address) => LoadVectorNonTemporal(mask, address); + + + /// LoadVectorx2 : Load two-element tuples into two vectors + + /// + /// svfloat16x2_t svld2[_f16](svbool_t pg, const float16_t *base) + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, half* address) => LoadVectorx2(mask, address); + + + /// LoadVectorx3 : Load three-element tuples into three vectors + + /// + /// svfloat16x3_t svld3[_f16](svbool_t pg, const float16_t *base) + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, half* address) => LoadVectorx3(mask, address); + + + /// LoadVectorx4 : Load four-element tuples into four vectors + + /// + /// svfloat16x4_t svld4[_f16](svbool_t pg, const float16_t *base) + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, half* address) => LoadVectorx4(mask, address); + + + /// Log2 : Base 2 logarithm as integer + + /// + /// svint16_t svlogb[_f16]_m(svint16_t inactive, svbool_t pg, svfloat16_t op) + /// svint16_t svlogb[_f16]_x(svbool_t pg, svfloat16_t op) + /// svint16_t svlogb[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector Log2(Vector value) => Log2(value); + + + /// Max : Maximum + + /// + /// svfloat16_t svmax[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svmax[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svmax[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector Max(Vector left, Vector right) => Max(left, right); + + + /// MaxAcross : Maximum reduction to scalar + + /// + /// float16_t svmaxv[_f16](svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector MaxAcross(Vector value) => MaxAcross(value); + + + /// MaxNumber : Maximum number + + /// + /// svfloat16_t svmaxnm[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svmaxnm[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svmaxnm[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector MaxNumber(Vector left, Vector right) => MaxNumber(left, right); + + + /// MaxNumberAcross : Maximum number reduction to scalar + + /// + /// float16_t svmaxnmv[_f16](svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector MaxNumberAcross(Vector value) => MaxNumberAcross(value); + + + /// MaxNumberPairwise : Maximum number pairwise + + /// + /// svfloat16_t svmaxnmp[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svmaxnmp[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector MaxNumberPairwise(Vector left, Vector right) => MaxNumberPairwise(left, right); + + + /// MaxPairwise : Maximum pairwise + + /// + /// svfloat16_t svmaxp[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svmaxp[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) => MaxPairwise(left, right); + + + /// Min : Minimum + + /// + /// svfloat16_t svmin[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svmin[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svmin[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector Min(Vector left, Vector right) => Min(left, right); + + + /// MinAcross : Minimum reduction to scalar + + /// + /// float16_t svminv[_f16](svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector MinAcross(Vector value) => MinAcross(value); + + + /// MinNumber : Minimum number + + /// + /// svfloat16_t svminnm[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svminnm[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svminnm[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector MinNumber(Vector left, Vector right) => MinNumber(left, right); + + + /// MinNumberAcross : Minimum number reduction to scalar + + /// + /// float16_t svminnmv[_f16](svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector MinNumberAcross(Vector value) => MinNumberAcross(value); + + + /// MinNumberPairwise : Minimum number pairwise + + /// + /// svfloat16_t svminnmp[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svminnmp[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector MinNumberPairwise(Vector left, Vector right) => MinNumberPairwise(left, right); + + + /// MinPairwise : Minimum pairwise + + /// + /// svfloat16_t svminp[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svminp[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) => MinPairwise(left, right); + + + /// Multiply : Multiply + + /// + /// svfloat16_t svmul[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svmul[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svmul[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector Multiply(Vector left, Vector right) => Multiply(left, right); + + + + + + /// MultiplyAddRotateComplex : Complex multiply-add with rotate + + /// + /// svfloat16_t svcmla[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_rotation) + /// svfloat16_t svcmla[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_rotation) + /// svfloat16_t svcmla[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) => MultiplyAddRotateComplex(addend, left, right, rotation); + + + /// MultiplyAddRotateComplexBySelectedScalar : Complex multiply-add with rotate + + /// + /// svfloat16_t svcmla_lane[_f16](svfloat16_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation) => MultiplyAddRotateComplexBySelectedScalar(addend, left, right, rightIndex, rotation); + + + /// MultiplyAddWideningLower : Multiply-add long (bottom) + + /// + /// svfloat32_t svmlalb[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3) + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningLower(op1, op2, op3); + + /// + /// svfloat32_t svmlalb_lane[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplyAddWideningLower(op1, op2, op3, imm_index); + + + /// MultiplyAddWideningUpper : Multiply-add long (top) + + /// + /// svfloat32_t svmlalt[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3) + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningUpper(op1, op2, op3); + + /// + /// svfloat32_t svmlalt_lane[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplyAddWideningUpper(op1, op2, op3, imm_index); + + + /// MultiplyBySelectedScalar : Multiply + + /// + /// svfloat16_t svmul_lane[_f16](svfloat16_t op1, svfloat16_t op2, uint64_t imm_index) + /// + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); + + + /// MultiplyExtended : Multiply extended (∞×0=2) + + /// + /// svfloat16_t svmulx[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svmulx[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svmulx[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector MultiplyExtended(Vector left, Vector right) => MultiplyExtended(left, right); + + + + + + /// MultiplySubtractWideningLower : Multiply-subtract long (bottom) + + /// + /// svfloat32_t svmlslb[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3) + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningLower(op1, op2, op3); + + /// + /// svfloat32_t svmlslb_lane[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplySubtractWideningLower(op1, op2, op3, imm_index); + + + /// MultiplySubtractWideningUpper : Multiply-subtract long (top) + + /// + /// svfloat32_t svmlslt[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3) + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningUpper(op1, op2, op3); + + /// + /// svfloat32_t svmlslt_lane[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index) + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplySubtractWideningUpper(op1, op2, op3, imm_index); + + + /// Negate : Negate + + /// + /// svfloat16_t svneg[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) + /// svfloat16_t svneg[_f16]_x(svbool_t pg, svfloat16_t op) + /// svfloat16_t svneg[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector Negate(Vector value) => Negate(value); + + + + + /// PopCount : Count nonzero bits + + /// + /// svuint16_t svcnt[_f16]_m(svuint16_t inactive, svbool_t pg, svfloat16_t op) + /// svuint16_t svcnt[_f16]_x(svbool_t pg, svfloat16_t op) + /// svuint16_t svcnt[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector PopCount(Vector value) => PopCount(value); + + + /// ReciprocalEstimate : Reciprocal estimate + + /// + /// svfloat16_t svrecpe[_f16](svfloat16_t op) + /// + public static unsafe Vector ReciprocalEstimate(Vector value) => ReciprocalEstimate(value); + + + /// ReciprocalExponent : Reciprocal exponent + + /// + /// svfloat16_t svrecpx[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) + /// svfloat16_t svrecpx[_f16]_x(svbool_t pg, svfloat16_t op) + /// svfloat16_t svrecpx[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector ReciprocalExponent(Vector value) => ReciprocalExponent(value); + + + /// ReciprocalSqrtEstimate : Reciprocal square root estimate + + /// + /// svfloat16_t svrsqrte[_f16](svfloat16_t op) + /// + public static unsafe Vector ReciprocalSqrtEstimate(Vector value) => ReciprocalSqrtEstimate(value); + + + /// ReciprocalSqrtStep : Reciprocal square root step + + /// + /// svfloat16_t svrsqrts[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector ReciprocalSqrtStep(Vector left, Vector right) => ReciprocalSqrtStep(left, right); + + + /// ReciprocalStep : Reciprocal step + + /// + /// svfloat16_t svrecps[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector ReciprocalStep(Vector left, Vector right) => ReciprocalStep(left, right); + + + /// ReverseElement : Reverse all elements + + /// + /// svfloat16_t svrev[_f16](svfloat16_t op) + /// + public static unsafe Vector ReverseElement(Vector value) => ReverseElement(value); + + + /// RoundAwayFromZero : Round to nearest, ties away from zero + + /// + /// svfloat16_t svrinta[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) + /// svfloat16_t svrinta[_f16]_x(svbool_t pg, svfloat16_t op) + /// svfloat16_t svrinta[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector RoundAwayFromZero(Vector value) => RoundAwayFromZero(value); + + + /// RoundToNearest : Round to nearest, ties to even + + /// + /// svfloat16_t svrintn[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) + /// svfloat16_t svrintn[_f16]_x(svbool_t pg, svfloat16_t op) + /// svfloat16_t svrintn[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector RoundToNearest(Vector value) => RoundToNearest(value); + + + /// RoundToNegativeInfinity : Round towards -∞ + + /// + /// svfloat16_t svrintm[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) + /// svfloat16_t svrintm[_f16]_x(svbool_t pg, svfloat16_t op) + /// svfloat16_t svrintm[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector RoundToNegativeInfinity(Vector value) => RoundToNegativeInfinity(value); + + + /// RoundToPositiveInfinity : Round towards +∞ + + /// + /// svfloat16_t svrintp[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) + /// svfloat16_t svrintp[_f16]_x(svbool_t pg, svfloat16_t op) + /// svfloat16_t svrintp[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector RoundToPositiveInfinity(Vector value) => RoundToPositiveInfinity(value); + + + /// RoundToZero : Round towards zero + + /// + /// svfloat16_t svrintz[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) + /// svfloat16_t svrintz[_f16]_x(svbool_t pg, svfloat16_t op) + /// svfloat16_t svrintz[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector RoundToZero(Vector value) => RoundToZero(value); + + + + + /// Scale : Adjust exponent + + /// + /// svfloat16_t svscale[_f16]_m(svbool_t pg, svfloat16_t op1, svint16_t op2) + /// svfloat16_t svscale[_f16]_x(svbool_t pg, svfloat16_t op1, svint16_t op2) + /// svfloat16_t svscale[_f16]_z(svbool_t pg, svfloat16_t op1, svint16_t op2) + /// + public static unsafe Vector Scale(Vector left, Vector right) => Scale(left, right); + + + /// Splice : Splice two vectors under predicate control + + /// + /// svfloat16_t svsplice[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) => Splice(mask, left, right); + + + /// Sqrt : Square root + + /// + /// svfloat16_t svsqrt[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) + /// svfloat16_t svsqrt[_f16]_x(svbool_t pg, svfloat16_t op) + /// svfloat16_t svsqrt[_f16]_z(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector Sqrt(Vector value) => Sqrt(value); + + + /// Store : Non-truncating store + + /// + /// void svst1[_f16](svbool_t pg, float16_t *base, svfloat16_t data) + /// + public static unsafe void Store(Vector mask, half* address, Vector data) => Store(mask, address, data); + + /// + /// void svst2[_f16](svbool_t pg, float16_t *base, svfloat16x2_t data) + /// + public static unsafe void Store(Vector mask, half* address, (Vector Value1, Vector Value2) data) => Store(mask, address, Value1,); + + /// + /// void svst3[_f16](svbool_t pg, float16_t *base, svfloat16x3_t data) + /// + public static unsafe void Store(Vector mask, half* address, (Vector Value1, Vector Value2, Vector Value3) data) => Store(mask, address, Value1,); + + /// + /// void svst4[_f16](svbool_t pg, float16_t *base, svfloat16x4_t data) + /// + public static unsafe void Store(Vector mask, half* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) => Store(mask, address, Value1,); + + + /// StoreNonTemporal : Non-truncating store, non-temporal + + /// + /// void svstnt1[_f16](svbool_t pg, float16_t *base, svfloat16_t data) + /// + public static unsafe void StoreNonTemporal(Vector mask, half* address, Vector data) => StoreNonTemporal(mask, address, data); + + + /// Subtract : Subtract + + /// + /// svfloat16_t svsub[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svsub[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// svfloat16_t svsub[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector Subtract(Vector left, Vector right) => Subtract(left, right); + + + + /// TransposeEven : Interleave even elements from two inputs + + /// + /// svfloat16_t svtrn1[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) => TransposeEven(left, right); + + + /// TransposeOdd : Interleave odd elements from two inputs + + /// + /// svfloat16_t svtrn2[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) => TransposeOdd(left, right); + + + /// TrigonometricMultiplyAddCoefficient : Trigonometric multiply-add coefficient + + /// + /// svfloat16_t svtmad[_f16](svfloat16_t op1, svfloat16_t op2, uint64_t imm3) + /// + public static unsafe Vector TrigonometricMultiplyAddCoefficient(Vector left, Vector right, [ConstantExpected] byte control) => TrigonometricMultiplyAddCoefficient(left, right, control); + + + /// TrigonometricSelectCoefficient : Trigonometric select coefficient + + /// + /// svfloat16_t svtssel[_f16](svfloat16_t op1, svuint16_t op2) + /// + public static unsafe Vector TrigonometricSelectCoefficient(Vector value, Vector selector) => TrigonometricSelectCoefficient(value, selector); + + + /// TrigonometricStartingValue : Trigonometric starting value + + /// + /// svfloat16_t svtsmul[_f16](svfloat16_t op1, svuint16_t op2) + /// + public static unsafe Vector TrigonometricStartingValue(Vector value, Vector sign) => TrigonometricStartingValue(value, sign); + + + /// UnzipEven : Concatenate even elements from two inputs + + /// + /// svfloat16_t svuzp1[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) => UnzipEven(left, right); + + + /// UnzipOdd : Concatenate odd elements from two inputs + + /// + /// svfloat16_t svuzp2[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) => UnzipOdd(left, right); + + + /// UpConvertWideningUpper : Up convert long (top) + + /// + /// svfloat32_t svcvtlt_f32[_f16]_m(svfloat32_t inactive, svbool_t pg, svfloat16_t op) + /// svfloat32_t svcvtlt_f32[_f16]_x(svbool_t pg, svfloat16_t op) + /// + public static unsafe Vector UpConvertWideningUpper(Vector value) => UpConvertWideningUpper(value); + + + /// VectorTableLookup : Table lookup in single-vector table + + /// + /// svfloat16_t svtbl[_f16](svfloat16_t data, svuint16_t indices) + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) => VectorTableLookup(data, indices); + + /// + /// svfloat16_t svtbl2[_f16](svfloat16x2_t data, svuint16_t indices) + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) => VectorTableLookup(data1,, indices); + + + /// VectorTableLookupExtension : Table lookup in single-vector table (merging) + + /// + /// svfloat16_t svtbx[_f16](svfloat16_t fallback, svfloat16_t data, svuint16_t indices) + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) => VectorTableLookupExtension(fallback, data, indices); + + + /// ZipHigh : Interleave elements from high halves of two inputs + + /// + /// svfloat16_t svzip2[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) => ZipHigh(left, right); + + + /// ZipLow : Interleave elements from low halves of two inputs + + /// + /// svfloat16_t svzip1[_f16](svfloat16_t op1, svfloat16_t op2) + /// + public static unsafe Vector ZipLow(Vector left, Vector right) => ZipLow(left, right); + + } +} + diff --git a/sve_api/out_cs_api/SveI8mm.PlatformNotSupported.cs b/sve_api/out_cs_api/SveI8mm.PlatformNotSupported.cs new file mode 100644 index 0000000000000..d2242eb8e4077 --- /dev/null +++ b/sve_api/out_cs_api/SveI8mm.PlatformNotSupported.cs @@ -0,0 +1,78 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveI8mm : AdvSimd + { + internal SveI8mm() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// DotProductSignedUnsigned : Dot product (signed × unsigned) + + /// + /// svint32_t svsudot[_s32](svint32_t op1, svint8_t op2, svuint8_t op3) + /// + public static unsafe Vector DotProductSignedUnsigned(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svsudot_lane[_s32](svint32_t op1, svint8_t op2, svuint8_t op3, uint64_t imm_index) + /// + public static unsafe Vector DotProductSignedUnsigned(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + + /// DotProductUnsignedSigned : Dot product (unsigned × signed) + + /// + /// svint32_t svusdot[_s32](svint32_t op1, svuint8_t op2, svint8_t op3) + /// + public static unsafe Vector DotProductUnsignedSigned(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svusdot_lane[_s32](svint32_t op1, svuint8_t op2, svint8_t op3, uint64_t imm_index) + /// + public static unsafe Vector DotProductUnsignedSigned(Vector op1, Vector op2, Vector op3, ulong imm_index) { throw new PlatformNotSupportedException(); } + + + /// MatrixMultiplyAccumulate : Matrix multiply-accumulate + + /// + /// svint32_t svmmla[_s32](svint32_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector MatrixMultiplyAccumulate(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svmmla[_u32](svuint32_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector MatrixMultiplyAccumulate(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + + /// MatrixMultiplyAccumulateUnsignedSigned : Matrix multiply-accumulate (unsigned × signed) + + /// + /// svint32_t svusmmla[_s32](svint32_t op1, svuint8_t op2, svint8_t op3) + /// + public static unsafe Vector MatrixMultiplyAccumulateUnsignedSigned(Vector op1, Vector op2, Vector op3) { throw new PlatformNotSupportedException(); } + + } +} + diff --git a/sve_api/out_cs_api/SveI8mm.System.Runtime.Intrinsics.cs b/sve_api/out_cs_api/SveI8mm.System.Runtime.Intrinsics.cs new file mode 100644 index 0000000000000..93cbab4369700 --- /dev/null +++ b/sve_api/out_cs_api/SveI8mm.System.Runtime.Intrinsics.cs @@ -0,0 +1,8 @@ + public static System.Numerics.Vector DotProductSignedUnsigned(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector DotProductSignedUnsigned(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector DotProductUnsignedSigned(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector DotProductUnsignedSigned(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3, ulong imm_index) { throw null; } + public static System.Numerics.Vector MatrixMultiplyAccumulate(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MatrixMultiplyAccumulate(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + public static System.Numerics.Vector MatrixMultiplyAccumulateUnsignedSigned(System.Numerics.Vector op1, System.Numerics.Vector op2, System.Numerics.Vector op3) { throw null; } + diff --git a/sve_api/out_cs_api/SveI8mm.cs b/sve_api/out_cs_api/SveI8mm.cs new file mode 100644 index 0000000000000..7903bdd4d304f --- /dev/null +++ b/sve_api/out_cs_api/SveI8mm.cs @@ -0,0 +1,78 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveI8mm : AdvSimd + { + internal SveI8mm() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// DotProductSignedUnsigned : Dot product (signed × unsigned) + + /// + /// svint32_t svsudot[_s32](svint32_t op1, svint8_t op2, svuint8_t op3) + /// + public static unsafe Vector DotProductSignedUnsigned(Vector op1, Vector op2, Vector op3) => DotProductSignedUnsigned(op1, op2, op3); + + /// + /// svint32_t svsudot_lane[_s32](svint32_t op1, svint8_t op2, svuint8_t op3, uint64_t imm_index) + /// + public static unsafe Vector DotProductSignedUnsigned(Vector op1, Vector op2, Vector op3, ulong imm_index) => DotProductSignedUnsigned(op1, op2, op3, imm_index); + + + /// DotProductUnsignedSigned : Dot product (unsigned × signed) + + /// + /// svint32_t svusdot[_s32](svint32_t op1, svuint8_t op2, svint8_t op3) + /// + public static unsafe Vector DotProductUnsignedSigned(Vector op1, Vector op2, Vector op3) => DotProductUnsignedSigned(op1, op2, op3); + + /// + /// svint32_t svusdot_lane[_s32](svint32_t op1, svuint8_t op2, svint8_t op3, uint64_t imm_index) + /// + public static unsafe Vector DotProductUnsignedSigned(Vector op1, Vector op2, Vector op3, ulong imm_index) => DotProductUnsignedSigned(op1, op2, op3, imm_index); + + + /// MatrixMultiplyAccumulate : Matrix multiply-accumulate + + /// + /// svint32_t svmmla[_s32](svint32_t op1, svint8_t op2, svint8_t op3) + /// + public static unsafe Vector MatrixMultiplyAccumulate(Vector op1, Vector op2, Vector op3) => MatrixMultiplyAccumulate(op1, op2, op3); + + /// + /// svuint32_t svmmla[_u32](svuint32_t op1, svuint8_t op2, svuint8_t op3) + /// + public static unsafe Vector MatrixMultiplyAccumulate(Vector op1, Vector op2, Vector op3) => MatrixMultiplyAccumulate(op1, op2, op3); + + + /// MatrixMultiplyAccumulateUnsignedSigned : Matrix multiply-accumulate (unsigned × signed) + + /// + /// svint32_t svusmmla[_s32](svint32_t op1, svuint8_t op2, svint8_t op3) + /// + public static unsafe Vector MatrixMultiplyAccumulateUnsignedSigned(Vector op1, Vector op2, Vector op3) => MatrixMultiplyAccumulateUnsignedSigned(op1, op2, op3); + + } +} + diff --git a/sve_api/out_cs_api/SveNone.PlatformNotSupported.cs b/sve_api/out_cs_api/SveNone.PlatformNotSupported.cs new file mode 100644 index 0000000000000..3bcf486914c49 --- /dev/null +++ b/sve_api/out_cs_api/SveNone.PlatformNotSupported.cs @@ -0,0 +1,1656 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveNone : AdvSimd + { + internal SveNone() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// ChangeOneVectorInATupleOfFourVectors : Change one vector in a tuple of four vectors + + /// + /// svint8x4_t svset4[_s8](svint8x4_t tuple, uint64_t imm_index, svint8_t x) + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svint16x4_t svset4[_s16](svint16x4_t tuple, uint64_t imm_index, svint16_t x) + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svint32x4_t svset4[_s32](svint32x4_t tuple, uint64_t imm_index, svint32_t x) + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svint64x4_t svset4[_s64](svint64x4_t tuple, uint64_t imm_index, svint64_t x) + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8x4_t svset4[_u8](svuint8x4_t tuple, uint64_t imm_index, svuint8_t x) + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16x4_t svset4[_u16](svuint16x4_t tuple, uint64_t imm_index, svuint16_t x) + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32x4_t svset4[_u32](svuint32x4_t tuple, uint64_t imm_index, svuint32_t x) + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64x4_t svset4[_u64](svuint64x4_t tuple, uint64_t imm_index, svuint64_t x) + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16x4_t svset4[_bf16](svbfloat16x4_t tuple, uint64_t imm_index, svbfloat16_t x) + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16x4_t svset4[_f16](svfloat16x4_t tuple, uint64_t imm_index, svfloat16_t x) + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32x4_t svset4[_f32](svfloat32x4_t tuple, uint64_t imm_index, svfloat32_t x) + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64x4_t svset4[_f64](svfloat64x4_t tuple, uint64_t imm_index, svfloat64_t x) + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + + /// ChangeOneVectorInATupleOfThreeVectors : Change one vector in a tuple of three vectors + + /// + /// svint8x3_t svset3[_s8](svint8x3_t tuple, uint64_t imm_index, svint8_t x) + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svint16x3_t svset3[_s16](svint16x3_t tuple, uint64_t imm_index, svint16_t x) + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svint32x3_t svset3[_s32](svint32x3_t tuple, uint64_t imm_index, svint32_t x) + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svint64x3_t svset3[_s64](svint64x3_t tuple, uint64_t imm_index, svint64_t x) + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8x3_t svset3[_u8](svuint8x3_t tuple, uint64_t imm_index, svuint8_t x) + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16x3_t svset3[_u16](svuint16x3_t tuple, uint64_t imm_index, svuint16_t x) + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32x3_t svset3[_u32](svuint32x3_t tuple, uint64_t imm_index, svuint32_t x) + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64x3_t svset3[_u64](svuint64x3_t tuple, uint64_t imm_index, svuint64_t x) + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16x3_t svset3[_bf16](svbfloat16x3_t tuple, uint64_t imm_index, svbfloat16_t x) + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16x3_t svset3[_f16](svfloat16x3_t tuple, uint64_t imm_index, svfloat16_t x) + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32x3_t svset3[_f32](svfloat32x3_t tuple, uint64_t imm_index, svfloat32_t x) + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64x3_t svset3[_f64](svfloat64x3_t tuple, uint64_t imm_index, svfloat64_t x) + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + + /// ChangeOneVectorInATupleOfTwoVectors : Change one vector in a tuple of two vectors + + /// + /// svint8x2_t svset2[_s8](svint8x2_t tuple, uint64_t imm_index, svint8_t x) + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svint16x2_t svset2[_s16](svint16x2_t tuple, uint64_t imm_index, svint16_t x) + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svint32x2_t svset2[_s32](svint32x2_t tuple, uint64_t imm_index, svint32_t x) + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svint64x2_t svset2[_s64](svint64x2_t tuple, uint64_t imm_index, svint64_t x) + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8x2_t svset2[_u8](svuint8x2_t tuple, uint64_t imm_index, svuint8_t x) + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16x2_t svset2[_u16](svuint16x2_t tuple, uint64_t imm_index, svuint16_t x) + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32x2_t svset2[_u32](svuint32x2_t tuple, uint64_t imm_index, svuint32_t x) + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64x2_t svset2[_u64](svuint64x2_t tuple, uint64_t imm_index, svuint64_t x) + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16x2_t svset2[_bf16](svbfloat16x2_t tuple, uint64_t imm_index, svbfloat16_t x) + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16x2_t svset2[_f16](svfloat16x2_t tuple, uint64_t imm_index, svfloat16_t x) + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32x2_t svset2[_f32](svfloat32x2_t tuple, uint64_t imm_index, svfloat32_t x) + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64x2_t svset2[_f64](svfloat64x2_t tuple, uint64_t imm_index, svfloat64_t x) + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) { throw new PlatformNotSupportedException(); } + + + /// CreateATupleOfFourVectors : Create a tuple of four vectors + + /// + /// svint8x4_t svcreate4[_s8](svint8_t x0, svint8_t x1, svint8_t x2, svint8_t x3) + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) { throw new PlatformNotSupportedException(); } + + /// + /// svint16x4_t svcreate4[_s16](svint16_t x0, svint16_t x1, svint16_t x2, svint16_t x3) + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) { throw new PlatformNotSupportedException(); } + + /// + /// svint32x4_t svcreate4[_s32](svint32_t x0, svint32_t x1, svint32_t x2, svint32_t x3) + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) { throw new PlatformNotSupportedException(); } + + /// + /// svint64x4_t svcreate4[_s64](svint64_t x0, svint64_t x1, svint64_t x2, svint64_t x3) + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8x4_t svcreate4[_u8](svuint8_t x0, svuint8_t x1, svuint8_t x2, svuint8_t x3) + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16x4_t svcreate4[_u16](svuint16_t x0, svuint16_t x1, svuint16_t x2, svuint16_t x3) + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32x4_t svcreate4[_u32](svuint32_t x0, svuint32_t x1, svuint32_t x2, svuint32_t x3) + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64x4_t svcreate4[_u64](svuint64_t x0, svuint64_t x1, svuint64_t x2, svuint64_t x3) + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16x4_t svcreate4[_bf16](svbfloat16_t x0, svbfloat16_t x1, svbfloat16_t x2, svbfloat16_t x3) + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16x4_t svcreate4[_f16](svfloat16_t x0, svfloat16_t x1, svfloat16_t x2, svfloat16_t x3) + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32x4_t svcreate4[_f32](svfloat32_t x0, svfloat32_t x1, svfloat32_t x2, svfloat32_t x3) + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64x4_t svcreate4[_f64](svfloat64_t x0, svfloat64_t x1, svfloat64_t x2, svfloat64_t x3) + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) { throw new PlatformNotSupportedException(); } + + + /// CreateATupleOfThreeVectors : Create a tuple of three vectors + + /// + /// svint8x3_t svcreate3[_s8](svint8_t x0, svint8_t x1, svint8_t x2) + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) { throw new PlatformNotSupportedException(); } + + /// + /// svint16x3_t svcreate3[_s16](svint16_t x0, svint16_t x1, svint16_t x2) + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) { throw new PlatformNotSupportedException(); } + + /// + /// svint32x3_t svcreate3[_s32](svint32_t x0, svint32_t x1, svint32_t x2) + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) { throw new PlatformNotSupportedException(); } + + /// + /// svint64x3_t svcreate3[_s64](svint64_t x0, svint64_t x1, svint64_t x2) + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8x3_t svcreate3[_u8](svuint8_t x0, svuint8_t x1, svuint8_t x2) + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16x3_t svcreate3[_u16](svuint16_t x0, svuint16_t x1, svuint16_t x2) + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32x3_t svcreate3[_u32](svuint32_t x0, svuint32_t x1, svuint32_t x2) + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64x3_t svcreate3[_u64](svuint64_t x0, svuint64_t x1, svuint64_t x2) + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16x3_t svcreate3[_bf16](svbfloat16_t x0, svbfloat16_t x1, svbfloat16_t x2) + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16x3_t svcreate3[_f16](svfloat16_t x0, svfloat16_t x1, svfloat16_t x2) + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32x3_t svcreate3[_f32](svfloat32_t x0, svfloat32_t x1, svfloat32_t x2) + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64x3_t svcreate3[_f64](svfloat64_t x0, svfloat64_t x1, svfloat64_t x2) + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) { throw new PlatformNotSupportedException(); } + + + /// CreateATupleOfTwoVectors : Create a tuple of two vectors + + /// + /// svint8x2_t svcreate2[_s8](svint8_t x0, svint8_t x1) + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) { throw new PlatformNotSupportedException(); } + + /// + /// svint16x2_t svcreate2[_s16](svint16_t x0, svint16_t x1) + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) { throw new PlatformNotSupportedException(); } + + /// + /// svint32x2_t svcreate2[_s32](svint32_t x0, svint32_t x1) + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) { throw new PlatformNotSupportedException(); } + + /// + /// svint64x2_t svcreate2[_s64](svint64_t x0, svint64_t x1) + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8x2_t svcreate2[_u8](svuint8_t x0, svuint8_t x1) + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16x2_t svcreate2[_u16](svuint16_t x0, svuint16_t x1) + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32x2_t svcreate2[_u32](svuint32_t x0, svuint32_t x1) + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64x2_t svcreate2[_u64](svuint64_t x0, svuint64_t x1) + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16x2_t svcreate2[_bf16](svbfloat16_t x0, svbfloat16_t x1) + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16x2_t svcreate2[_f16](svfloat16_t x0, svfloat16_t x1) + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32x2_t svcreate2[_f32](svfloat32_t x0, svfloat32_t x1) + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64x2_t svcreate2[_f64](svfloat64_t x0, svfloat64_t x1) + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) { throw new PlatformNotSupportedException(); } + + + /// CreateAnUninitializedTupleOfFourVectors : Create an uninitialized tuple of four vectors + + /// + /// svint8x4_t svundef4_s8() + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svint16x4_t svundef4_s16() + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svint32x4_t svundef4_s32() + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svint64x4_t svundef4_s64() + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svuint8x4_t svundef4_u8() + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svuint16x4_t svundef4_u16() + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svuint32x4_t svundef4_u32() + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svuint64x4_t svundef4_u64() + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16x4_t svundef4_bf16() + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16x4_t svundef4_f16() + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32x4_t svundef4_f32() + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64x4_t svundef4_f64() + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() { throw new PlatformNotSupportedException(); } + + + /// CreateAnUninitializedTupleOfThreeVectors : Create an uninitialized tuple of three vectors + + /// + /// svint8x3_t svundef3_s8() + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svint16x3_t svundef3_s16() + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svint32x3_t svundef3_s32() + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svint64x3_t svundef3_s64() + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svuint8x3_t svundef3_u8() + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svuint16x3_t svundef3_u16() + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svuint32x3_t svundef3_u32() + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svuint64x3_t svundef3_u64() + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16x3_t svundef3_bf16() + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16x3_t svundef3_f16() + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32x3_t svundef3_f32() + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64x3_t svundef3_f64() + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() { throw new PlatformNotSupportedException(); } + + + /// CreateAnUninitializedTupleOfTwoVectors : Create an uninitialized tuple of two vectors + + /// + /// svint8x2_t svundef2_s8() + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svint16x2_t svundef2_s16() + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svint32x2_t svundef2_s32() + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svint64x2_t svundef2_s64() + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svuint8x2_t svundef2_u8() + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svuint16x2_t svundef2_u16() + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svuint32x2_t svundef2_u32() + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svuint64x2_t svundef2_u64() + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16x2_t svundef2_bf16() + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16x2_t svundef2_f16() + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32x2_t svundef2_f32() + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64x2_t svundef2_f64() + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() { throw new PlatformNotSupportedException(); } + + + /// CreateAnUninitializedVector : Create an uninitialized vector + + /// + /// svint8_t svundef_s8() + /// + public static unsafe Vector CreateAnUninitializedVector() { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svundef_s16() + /// + public static unsafe Vector CreateAnUninitializedVector() { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svundef_s32() + /// + public static unsafe Vector CreateAnUninitializedVector() { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svundef_s64() + /// + public static unsafe Vector CreateAnUninitializedVector() { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svundef_u8() + /// + public static unsafe Vector CreateAnUninitializedVector() { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svundef_u16() + /// + public static unsafe Vector CreateAnUninitializedVector() { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svundef_u32() + /// + public static unsafe Vector CreateAnUninitializedVector() { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svundef_u64() + /// + public static unsafe Vector CreateAnUninitializedVector() { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16_t svundef_bf16() + /// + public static unsafe Vector CreateAnUninitializedVector() { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16_t svundef_f16() + /// + public static unsafe Vector CreateAnUninitializedVector() { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svundef_f32() + /// + public static unsafe Vector CreateAnUninitializedVector() { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svundef_f64() + /// + public static unsafe Vector CreateAnUninitializedVector() { throw new PlatformNotSupportedException(); } + + + /// DuplicateSelectedScalarToVector : Broadcast a quadword of scalars + + /// + /// svint8_t svdupq[_n]_s8(int8_t x0, int8_t x1, int8_t x2, int8_t x3, int8_t x4, int8_t x5, int8_t x6, int8_t x7, int8_t x8, int8_t x9, int8_t x10, int8_t x11, int8_t x12, int8_t x13, int8_t x14, int8_t x15) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(sbyte x0, [ConstantExpected] byte index, sbyte x2, sbyte x3, sbyte x4, sbyte x5, sbyte x6, sbyte x7, sbyte x8, sbyte x9, sbyte x10, sbyte x11, sbyte x12, sbyte x13, sbyte x14, sbyte x15) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svdupq[_n]_s16(int16_t x0, int16_t x1, int16_t x2, int16_t x3, int16_t x4, int16_t x5, int16_t x6, int16_t x7) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(short x0, [ConstantExpected] byte index, short x2, short x3, short x4, short x5, short x6, short x7) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svdupq[_n]_s32(int32_t x0, int32_t x1, int32_t x2, int32_t x3) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(int x0, [ConstantExpected] byte index, int x2, int x3) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svdupq[_n]_s64(int64_t x0, int64_t x1) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(long x0, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svdupq[_n]_u8(uint8_t x0, uint8_t x1, uint8_t x2, uint8_t x3, uint8_t x4, uint8_t x5, uint8_t x6, uint8_t x7, uint8_t x8, uint8_t x9, uint8_t x10, uint8_t x11, uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(byte x0, [ConstantExpected] byte index, byte x2, byte x3, byte x4, byte x5, byte x6, byte x7, byte x8, byte x9, byte x10, byte x11, byte x12, byte x13, byte x14, byte x15) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svdupq[_n]_b8(bool x0, bool x1, bool x2, bool x3, bool x4, bool x5, bool x6, bool x7, bool x8, bool x9, bool x10, bool x11, bool x12, bool x13, bool x14, bool x15) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(bool x0, [ConstantExpected] byte index, bool x2, bool x3, bool x4, bool x5, bool x6, bool x7, bool x8, bool x9, bool x10, bool x11, bool x12, bool x13, bool x14, bool x15) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svdupq[_n]_u16(uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3, uint16_t x4, uint16_t x5, uint16_t x6, uint16_t x7) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(ushort x0, [ConstantExpected] byte index, ushort x2, ushort x3, ushort x4, ushort x5, ushort x6, ushort x7) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svdupq[_n]_b16(bool x0, bool x1, bool x2, bool x3, bool x4, bool x5, bool x6, bool x7) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(bool x0, [ConstantExpected] byte index, bool x2, bool x3, bool x4, bool x5, bool x6, bool x7) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svdupq[_n]_u32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(uint x0, [ConstantExpected] byte index, uint x2, uint x3) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svdupq[_n]_b32(bool x0, bool x1, bool x2, bool x3) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(bool x0, [ConstantExpected] byte index, bool x2, bool x3) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svdupq[_n]_u64(uint64_t x0, uint64_t x1) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(ulong x0, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + /// + /// svbool_t svdupq[_n]_b64(bool x0, bool x1) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(bool x0, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16_t svdupq[_n]_bf16(bfloat16_t x0, bfloat16_t x1, bfloat16_t x2, bfloat16_t x3, bfloat16_t x4, bfloat16_t x5, bfloat16_t x6, bfloat16_t x7) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(bfloat16 x0, [ConstantExpected] byte index, bfloat16 x2, bfloat16 x3, bfloat16 x4, bfloat16 x5, bfloat16 x6, bfloat16 x7) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16_t svdupq[_n]_f16(float16_t x0, float16_t x1, float16_t x2, float16_t x3, float16_t x4, float16_t x5, float16_t x6, float16_t x7) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(half x0, [ConstantExpected] byte index, half x2, half x3, half x4, half x5, half x6, half x7) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svdupq[_n]_f32(float32_t x0, float32_t x1, float32_t x2, float32_t x3) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(float x0, [ConstantExpected] byte index, float x2, float x3) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svdupq[_n]_f64(float64_t x0, float64_t x1) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(double x0, [ConstantExpected] byte index) { throw new PlatformNotSupportedException(); } + + + /// ExtractOneVectorFromATupleOfFourVectors : Extract one vector from a tuple of four vectors + + /// + /// svint8_t svget4[_s8](svint8x4_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svget4[_s16](svint16x4_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svget4[_s32](svint32x4_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svget4[_s64](svint64x4_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svget4[_u8](svuint8x4_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svget4[_u16](svuint16x4_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svget4[_u32](svuint32x4_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svget4[_u64](svuint64x4_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16_t svget4[_bf16](svbfloat16x4_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16_t svget4[_f16](svfloat16x4_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svget4[_f32](svfloat32x4_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svget4[_f64](svfloat64x4_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) { throw new PlatformNotSupportedException(); } + + + /// ExtractOneVectorFromATupleOfThreeVectors : Extract one vector from a tuple of three vectors + + /// + /// svint8_t svget3[_s8](svint8x3_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svget3[_s16](svint16x3_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svget3[_s32](svint32x3_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svget3[_s64](svint64x3_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svget3[_u8](svuint8x3_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svget3[_u16](svuint16x3_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svget3[_u32](svuint32x3_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svget3[_u64](svuint64x3_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16_t svget3[_bf16](svbfloat16x3_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16_t svget3[_f16](svfloat16x3_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svget3[_f32](svfloat32x3_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svget3[_f64](svfloat64x3_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) { throw new PlatformNotSupportedException(); } + + + /// ExtractOneVectorFromATupleOfTwoVectors : Extract one vector from a tuple of two vectors + + /// + /// svint8_t svget2[_s8](svint8x2_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svget2[_s16](svint16x2_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svget2[_s32](svint32x2_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svget2[_s64](svint64x2_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svget2[_u8](svuint8x2_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svget2[_u16](svuint16x2_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svget2[_u32](svuint32x2_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svget2[_u64](svuint64x2_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16_t svget2[_bf16](svbfloat16x2_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16_t svget2[_f16](svfloat16x2_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svget2[_f32](svfloat32x2_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svget2[_f64](svfloat64x2_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) { throw new PlatformNotSupportedException(); } + + + /// ReinterpretVectorContents : Reinterpret vector contents + + /// + /// svint8_t svreinterpret_s8[_bf16](svbfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint8_t svreinterpret_s8[_f16](svfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint8_t svreinterpret_s8[_f32](svfloat32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint8_t svreinterpret_s8[_f64](svfloat64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint8_t svreinterpret_s8[_s8](svint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint8_t svreinterpret_s8[_s16](svint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint8_t svreinterpret_s8[_s32](svint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint8_t svreinterpret_s8[_s64](svint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint8_t svreinterpret_s8[_u8](svuint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint8_t svreinterpret_s8[_u16](svuint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint8_t svreinterpret_s8[_u32](svuint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint8_t svreinterpret_s8[_u64](svuint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svreinterpret_s16[_s8](svint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svreinterpret_s16[_s32](svint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svreinterpret_s16[_s64](svint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svreinterpret_s16[_u8](svuint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svreinterpret_s16[_u16](svuint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svreinterpret_s16[_u32](svuint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svreinterpret_s16[_bf16](svbfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svreinterpret_s16[_f16](svfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svreinterpret_s16[_f32](svfloat32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svreinterpret_s16[_f64](svfloat64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svreinterpret_s16[_s16](svint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint16_t svreinterpret_s16[_u64](svuint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svreinterpret_s32[_s8](svint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svreinterpret_s32[_s16](svint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svreinterpret_s32[_s64](svint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svreinterpret_s32[_u8](svuint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svreinterpret_s32[_u16](svuint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svreinterpret_s32[_u32](svuint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svreinterpret_s32[_u64](svuint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svreinterpret_s32[_bf16](svbfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svreinterpret_s32[_f16](svfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svreinterpret_s32[_f32](svfloat32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svreinterpret_s32[_f64](svfloat64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint32_t svreinterpret_s32[_s32](svint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svreinterpret_s64[_s8](svint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svreinterpret_s64[_s16](svint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svreinterpret_s64[_s32](svint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svreinterpret_s64[_u8](svuint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svreinterpret_s64[_u16](svuint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svreinterpret_s64[_u32](svuint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svreinterpret_s64[_u64](svuint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svreinterpret_s64[_bf16](svbfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svreinterpret_s64[_f16](svfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svreinterpret_s64[_f32](svfloat32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svreinterpret_s64[_f64](svfloat64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svint64_t svreinterpret_s64[_s64](svint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svreinterpret_u8[_s8](svint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svreinterpret_u8[_s16](svint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svreinterpret_u8[_s32](svint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svreinterpret_u8[_s64](svint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svreinterpret_u8[_u16](svuint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svreinterpret_u8[_u32](svuint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svreinterpret_u8[_u64](svuint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svreinterpret_u8[_bf16](svbfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svreinterpret_u8[_f16](svfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svreinterpret_u8[_f32](svfloat32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svreinterpret_u8[_f64](svfloat64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint8_t svreinterpret_u8[_u8](svuint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svreinterpret_u16[_s8](svint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svreinterpret_u16[_s16](svint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svreinterpret_u16[_s32](svint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svreinterpret_u16[_s64](svint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svreinterpret_u16[_u8](svuint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svreinterpret_u16[_u32](svuint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svreinterpret_u16[_u64](svuint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svreinterpret_u16[_bf16](svbfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svreinterpret_u16[_f16](svfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svreinterpret_u16[_f32](svfloat32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svreinterpret_u16[_f64](svfloat64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint16_t svreinterpret_u16[_u16](svuint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svreinterpret_u32[_s8](svint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svreinterpret_u32[_s16](svint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svreinterpret_u32[_s32](svint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svreinterpret_u32[_s64](svint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svreinterpret_u32[_u8](svuint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svreinterpret_u32[_u16](svuint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svreinterpret_u32[_u64](svuint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svreinterpret_u32[_bf16](svbfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svreinterpret_u32[_f16](svfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svreinterpret_u32[_f32](svfloat32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svreinterpret_u32[_f64](svfloat64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint32_t svreinterpret_u32[_u32](svuint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svreinterpret_u64[_s8](svint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svreinterpret_u64[_s16](svint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svreinterpret_u64[_s32](svint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svreinterpret_u64[_s64](svint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svreinterpret_u64[_u8](svuint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svreinterpret_u64[_u16](svuint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svreinterpret_u64[_u32](svuint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svreinterpret_u64[_bf16](svbfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svreinterpret_u64[_f16](svfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svreinterpret_u64[_f32](svfloat32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svreinterpret_u64[_f64](svfloat64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svreinterpret_u64[_u64](svuint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16_t svreinterpret_bf16[_bf16](svbfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16_t svreinterpret_bf16[_s8](svint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16_t svreinterpret_bf16[_s16](svint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16_t svreinterpret_bf16[_s32](svint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16_t svreinterpret_bf16[_s64](svint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16_t svreinterpret_bf16[_u8](svuint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16_t svreinterpret_bf16[_u16](svuint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16_t svreinterpret_bf16[_u32](svuint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16_t svreinterpret_bf16[_u64](svuint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16_t svreinterpret_bf16[_f16](svfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16_t svreinterpret_bf16[_f32](svfloat32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svbfloat16_t svreinterpret_bf16[_f64](svfloat64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16_t svreinterpret_f16[_s8](svint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16_t svreinterpret_f16[_s16](svint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16_t svreinterpret_f16[_s32](svint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16_t svreinterpret_f16[_u8](svuint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16_t svreinterpret_f16[_u16](svuint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16_t svreinterpret_f16[_u32](svuint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16_t svreinterpret_f16[_bf16](svbfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16_t svreinterpret_f16[_f16](svfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16_t svreinterpret_f16[_s64](svint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16_t svreinterpret_f16[_u64](svuint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16_t svreinterpret_f16[_f32](svfloat32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat16_t svreinterpret_f16[_f64](svfloat64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svreinterpret_f32[_s8](svint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svreinterpret_f32[_s16](svint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svreinterpret_f32[_s32](svint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svreinterpret_f32[_s64](svint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svreinterpret_f32[_u8](svuint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svreinterpret_f32[_u16](svuint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svreinterpret_f32[_u32](svuint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svreinterpret_f32[_u64](svuint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svreinterpret_f32[_bf16](svbfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svreinterpret_f32[_f16](svfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svreinterpret_f32[_f32](svfloat32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat32_t svreinterpret_f32[_f64](svfloat64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svreinterpret_f64[_bf16](svbfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svreinterpret_f64[_f16](svfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svreinterpret_f64[_f32](svfloat32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svreinterpret_f64[_f64](svfloat64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svreinterpret_f64[_s8](svint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svreinterpret_f64[_s16](svint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svreinterpret_f64[_s32](svint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svreinterpret_f64[_s64](svint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svreinterpret_f64[_u8](svuint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svreinterpret_f64[_u16](svuint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svreinterpret_f64[_u32](svuint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + /// + /// svfloat64_t svreinterpret_f64[_u64](svuint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) { throw new PlatformNotSupportedException(); } + + } +} + diff --git a/sve_api/out_cs_api/SveNone.System.Runtime.Intrinsics.cs b/sve_api/out_cs_api/SveNone.System.Runtime.Intrinsics.cs new file mode 100644 index 0000000000000..036f9200d2680 --- /dev/null +++ b/sve_api/out_cs_api/SveNone.System.Runtime.Intrinsics.cs @@ -0,0 +1,317 @@ + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfFourVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3, System.Numerics.Vector tuple4), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfFourVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3, System.Numerics.Vector tuple4), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfFourVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3, System.Numerics.Vector tuple4), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfFourVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3, System.Numerics.Vector tuple4), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfFourVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3, System.Numerics.Vector tuple4), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfFourVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3, System.Numerics.Vector tuple4), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfFourVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3, System.Numerics.Vector tuple4), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfFourVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3, System.Numerics.Vector tuple4), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfFourVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3, System.Numerics.Vector tuple4), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfFourVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3, System.Numerics.Vector tuple4), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfFourVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3, System.Numerics.Vector tuple4), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfFourVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3, System.Numerics.Vector tuple4), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfThreeVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfThreeVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfThreeVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfThreeVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfThreeVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfThreeVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfThreeVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfThreeVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfThreeVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfThreeVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfThreeVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfThreeVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfTwoVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfTwoVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfTwoVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfTwoVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfTwoVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfTwoVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfTwoVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfTwoVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfTwoVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfTwoVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfTwoVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) ChangeOneVectorInATupleOfTwoVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2), ulong imm_index, System.Numerics.Vector x) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfFourVectors(System.Numerics.Vector x0, System.Numerics.Vector x1, System.Numerics.Vector x2, System.Numerics.Vector x3) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfFourVectors(System.Numerics.Vector x0, System.Numerics.Vector x1, System.Numerics.Vector x2, System.Numerics.Vector x3) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfFourVectors(System.Numerics.Vector x0, System.Numerics.Vector x1, System.Numerics.Vector x2, System.Numerics.Vector x3) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfFourVectors(System.Numerics.Vector x0, System.Numerics.Vector x1, System.Numerics.Vector x2, System.Numerics.Vector x3) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfFourVectors(System.Numerics.Vector x0, System.Numerics.Vector x1, System.Numerics.Vector x2, System.Numerics.Vector x3) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfFourVectors(System.Numerics.Vector x0, System.Numerics.Vector x1, System.Numerics.Vector x2, System.Numerics.Vector x3) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfFourVectors(System.Numerics.Vector x0, System.Numerics.Vector x1, System.Numerics.Vector x2, System.Numerics.Vector x3) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfFourVectors(System.Numerics.Vector x0, System.Numerics.Vector x1, System.Numerics.Vector x2, System.Numerics.Vector x3) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfFourVectors(System.Numerics.Vector x0, System.Numerics.Vector x1, System.Numerics.Vector x2, System.Numerics.Vector x3) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfFourVectors(System.Numerics.Vector x0, System.Numerics.Vector x1, System.Numerics.Vector x2, System.Numerics.Vector x3) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfFourVectors(System.Numerics.Vector x0, System.Numerics.Vector x1, System.Numerics.Vector x2, System.Numerics.Vector x3) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfFourVectors(System.Numerics.Vector x0, System.Numerics.Vector x1, System.Numerics.Vector x2, System.Numerics.Vector x3) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfThreeVectors(System.Numerics.Vector x0, System.Numerics.Vector x1, System.Numerics.Vector x2) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfThreeVectors(System.Numerics.Vector x0, System.Numerics.Vector x1, System.Numerics.Vector x2) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfThreeVectors(System.Numerics.Vector x0, System.Numerics.Vector x1, System.Numerics.Vector x2) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfThreeVectors(System.Numerics.Vector x0, System.Numerics.Vector x1, System.Numerics.Vector x2) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfThreeVectors(System.Numerics.Vector x0, System.Numerics.Vector x1, System.Numerics.Vector x2) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfThreeVectors(System.Numerics.Vector x0, System.Numerics.Vector x1, System.Numerics.Vector x2) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfThreeVectors(System.Numerics.Vector x0, System.Numerics.Vector x1, System.Numerics.Vector x2) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfThreeVectors(System.Numerics.Vector x0, System.Numerics.Vector x1, System.Numerics.Vector x2) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfThreeVectors(System.Numerics.Vector x0, System.Numerics.Vector x1, System.Numerics.Vector x2) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfThreeVectors(System.Numerics.Vector x0, System.Numerics.Vector x1, System.Numerics.Vector x2) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfThreeVectors(System.Numerics.Vector x0, System.Numerics.Vector x1, System.Numerics.Vector x2) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfThreeVectors(System.Numerics.Vector x0, System.Numerics.Vector x1, System.Numerics.Vector x2) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfTwoVectors(System.Numerics.Vector x0, System.Numerics.Vector x1) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfTwoVectors(System.Numerics.Vector x0, System.Numerics.Vector x1) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfTwoVectors(System.Numerics.Vector x0, System.Numerics.Vector x1) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfTwoVectors(System.Numerics.Vector x0, System.Numerics.Vector x1) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfTwoVectors(System.Numerics.Vector x0, System.Numerics.Vector x1) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfTwoVectors(System.Numerics.Vector x0, System.Numerics.Vector x1) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfTwoVectors(System.Numerics.Vector x0, System.Numerics.Vector x1) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfTwoVectors(System.Numerics.Vector x0, System.Numerics.Vector x1) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfTwoVectors(System.Numerics.Vector x0, System.Numerics.Vector x1) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfTwoVectors(System.Numerics.Vector x0, System.Numerics.Vector x1) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfTwoVectors(System.Numerics.Vector x0, System.Numerics.Vector x1) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) CreateATupleOfTwoVectors(System.Numerics.Vector x0, System.Numerics.Vector x1) { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfFourVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfFourVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfFourVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfFourVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfFourVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfFourVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfFourVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfFourVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfFourVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfFourVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfFourVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfFourVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfThreeVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfThreeVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfThreeVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfThreeVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfThreeVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfThreeVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfThreeVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfThreeVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfThreeVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfThreeVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfThreeVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfThreeVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfTwoVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfTwoVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfTwoVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfTwoVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfTwoVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfTwoVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfTwoVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfTwoVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfTwoVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfTwoVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfTwoVectors() { throw null; } + public static (System.Numerics.Vector, System.Numerics.Vector) CreateAnUninitializedTupleOfTwoVectors() { throw null; } + public static System.Numerics.Vector CreateAnUninitializedVector() { throw null; } + public static System.Numerics.Vector CreateAnUninitializedVector() { throw null; } + public static System.Numerics.Vector CreateAnUninitializedVector() { throw null; } + public static System.Numerics.Vector CreateAnUninitializedVector() { throw null; } + public static System.Numerics.Vector CreateAnUninitializedVector() { throw null; } + public static System.Numerics.Vector CreateAnUninitializedVector() { throw null; } + public static System.Numerics.Vector CreateAnUninitializedVector() { throw null; } + public static System.Numerics.Vector CreateAnUninitializedVector() { throw null; } + public static System.Numerics.Vector CreateAnUninitializedVector() { throw null; } + public static System.Numerics.Vector CreateAnUninitializedVector() { throw null; } + public static System.Numerics.Vector CreateAnUninitializedVector() { throw null; } + public static System.Numerics.Vector CreateAnUninitializedVector() { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(sbyte x0, [ConstantExpected] byte index, sbyte x2, sbyte x3, sbyte x4, sbyte x5, sbyte x6, sbyte x7, sbyte x8, sbyte x9, sbyte x10, sbyte x11, sbyte x12, sbyte x13, sbyte x14, sbyte x15) { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(short x0, [ConstantExpected] byte index, short x2, short x3, short x4, short x5, short x6, short x7) { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(int x0, [ConstantExpected] byte index, int x2, int x3) { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(long x0, [ConstantExpected] byte index) { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(byte x0, [ConstantExpected] byte index, byte x2, byte x3, byte x4, byte x5, byte x6, byte x7, byte x8, byte x9, byte x10, byte x11, byte x12, byte x13, byte x14, byte x15) { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(bool x0, [ConstantExpected] byte index, bool x2, bool x3, bool x4, bool x5, bool x6, bool x7, bool x8, bool x9, bool x10, bool x11, bool x12, bool x13, bool x14, bool x15) { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(ushort x0, [ConstantExpected] byte index, ushort x2, ushort x3, ushort x4, ushort x5, ushort x6, ushort x7) { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(bool x0, [ConstantExpected] byte index, bool x2, bool x3, bool x4, bool x5, bool x6, bool x7) { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(uint x0, [ConstantExpected] byte index, uint x2, uint x3) { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(bool x0, [ConstantExpected] byte index, bool x2, bool x3) { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(ulong x0, [ConstantExpected] byte index) { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(bool x0, [ConstantExpected] byte index) { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(bfloat16 x0, [ConstantExpected] byte index, bfloat16 x2, bfloat16 x3, bfloat16 x4, bfloat16 x5, bfloat16 x6, bfloat16 x7) { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(half x0, [ConstantExpected] byte index, half x2, half x3, half x4, half x5, half x6, half x7) { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(float x0, [ConstantExpected] byte index, float x2, float x3) { throw null; } + public static System.Numerics.Vector DuplicateSelectedScalarToVector(double x0, [ConstantExpected] byte index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfFourVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3, System.Numerics.Vector tuple4), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfFourVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3, System.Numerics.Vector tuple4), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfFourVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3, System.Numerics.Vector tuple4), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfFourVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3, System.Numerics.Vector tuple4), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfFourVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3, System.Numerics.Vector tuple4), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfFourVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3, System.Numerics.Vector tuple4), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfFourVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3, System.Numerics.Vector tuple4), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfFourVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3, System.Numerics.Vector tuple4), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfFourVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3, System.Numerics.Vector tuple4), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfFourVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3, System.Numerics.Vector tuple4), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfFourVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3, System.Numerics.Vector tuple4), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfFourVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3, System.Numerics.Vector tuple4), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfThreeVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfThreeVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfThreeVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfThreeVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfThreeVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfThreeVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfThreeVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfThreeVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfThreeVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfThreeVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfThreeVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfThreeVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2, System.Numerics.Vector tuple3), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfTwoVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfTwoVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfTwoVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfTwoVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfTwoVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfTwoVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfTwoVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfTwoVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfTwoVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfTwoVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfTwoVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2), ulong imm_index) { throw null; } + public static System.Numerics.Vector ExtractOneVectorFromATupleOfTwoVectors((System.Numerics.Vector tuple1, System.Numerics.Vector tuple2), ulong imm_index) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + public static System.Numerics.Vector ReinterpretVectorContents(System.Numerics.Vector value) { throw null; } + diff --git a/sve_api/out_cs_api/SveNone.cs b/sve_api/out_cs_api/SveNone.cs new file mode 100644 index 0000000000000..4c4500dc4021e --- /dev/null +++ b/sve_api/out_cs_api/SveNone.cs @@ -0,0 +1,1656 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveNone : AdvSimd + { + internal SveNone() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// ChangeOneVectorInATupleOfFourVectors : Change one vector in a tuple of four vectors + + /// + /// svint8x4_t svset4[_s8](svint8x4_t tuple, uint64_t imm_index, svint8_t x) + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfFourVectors(tuple1,, imm_index, x); + + /// + /// svint16x4_t svset4[_s16](svint16x4_t tuple, uint64_t imm_index, svint16_t x) + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfFourVectors(tuple1,, imm_index, x); + + /// + /// svint32x4_t svset4[_s32](svint32x4_t tuple, uint64_t imm_index, svint32_t x) + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfFourVectors(tuple1,, imm_index, x); + + /// + /// svint64x4_t svset4[_s64](svint64x4_t tuple, uint64_t imm_index, svint64_t x) + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfFourVectors(tuple1,, imm_index, x); + + /// + /// svuint8x4_t svset4[_u8](svuint8x4_t tuple, uint64_t imm_index, svuint8_t x) + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfFourVectors(tuple1,, imm_index, x); + + /// + /// svuint16x4_t svset4[_u16](svuint16x4_t tuple, uint64_t imm_index, svuint16_t x) + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfFourVectors(tuple1,, imm_index, x); + + /// + /// svuint32x4_t svset4[_u32](svuint32x4_t tuple, uint64_t imm_index, svuint32_t x) + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfFourVectors(tuple1,, imm_index, x); + + /// + /// svuint64x4_t svset4[_u64](svuint64x4_t tuple, uint64_t imm_index, svuint64_t x) + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfFourVectors(tuple1,, imm_index, x); + + /// + /// svbfloat16x4_t svset4[_bf16](svbfloat16x4_t tuple, uint64_t imm_index, svbfloat16_t x) + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfFourVectors(tuple1,, imm_index, x); + + /// + /// svfloat16x4_t svset4[_f16](svfloat16x4_t tuple, uint64_t imm_index, svfloat16_t x) + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfFourVectors(tuple1,, imm_index, x); + + /// + /// svfloat32x4_t svset4[_f32](svfloat32x4_t tuple, uint64_t imm_index, svfloat32_t x) + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfFourVectors(tuple1,, imm_index, x); + + /// + /// svfloat64x4_t svset4[_f64](svfloat64x4_t tuple, uint64_t imm_index, svfloat64_t x) + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfFourVectors(tuple1,, imm_index, x); + + + /// ChangeOneVectorInATupleOfThreeVectors : Change one vector in a tuple of three vectors + + /// + /// svint8x3_t svset3[_s8](svint8x3_t tuple, uint64_t imm_index, svint8_t x) + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfThreeVectors(tuple1,, imm_index, x); + + /// + /// svint16x3_t svset3[_s16](svint16x3_t tuple, uint64_t imm_index, svint16_t x) + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfThreeVectors(tuple1,, imm_index, x); + + /// + /// svint32x3_t svset3[_s32](svint32x3_t tuple, uint64_t imm_index, svint32_t x) + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfThreeVectors(tuple1,, imm_index, x); + + /// + /// svint64x3_t svset3[_s64](svint64x3_t tuple, uint64_t imm_index, svint64_t x) + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfThreeVectors(tuple1,, imm_index, x); + + /// + /// svuint8x3_t svset3[_u8](svuint8x3_t tuple, uint64_t imm_index, svuint8_t x) + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfThreeVectors(tuple1,, imm_index, x); + + /// + /// svuint16x3_t svset3[_u16](svuint16x3_t tuple, uint64_t imm_index, svuint16_t x) + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfThreeVectors(tuple1,, imm_index, x); + + /// + /// svuint32x3_t svset3[_u32](svuint32x3_t tuple, uint64_t imm_index, svuint32_t x) + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfThreeVectors(tuple1,, imm_index, x); + + /// + /// svuint64x3_t svset3[_u64](svuint64x3_t tuple, uint64_t imm_index, svuint64_t x) + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfThreeVectors(tuple1,, imm_index, x); + + /// + /// svbfloat16x3_t svset3[_bf16](svbfloat16x3_t tuple, uint64_t imm_index, svbfloat16_t x) + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfThreeVectors(tuple1,, imm_index, x); + + /// + /// svfloat16x3_t svset3[_f16](svfloat16x3_t tuple, uint64_t imm_index, svfloat16_t x) + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfThreeVectors(tuple1,, imm_index, x); + + /// + /// svfloat32x3_t svset3[_f32](svfloat32x3_t tuple, uint64_t imm_index, svfloat32_t x) + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfThreeVectors(tuple1,, imm_index, x); + + /// + /// svfloat64x3_t svset3[_f64](svfloat64x3_t tuple, uint64_t imm_index, svfloat64_t x) + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfThreeVectors(tuple1,, imm_index, x); + + + /// ChangeOneVectorInATupleOfTwoVectors : Change one vector in a tuple of two vectors + + /// + /// svint8x2_t svset2[_s8](svint8x2_t tuple, uint64_t imm_index, svint8_t x) + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfTwoVectors(tuple1,, imm_index, x); + + /// + /// svint16x2_t svset2[_s16](svint16x2_t tuple, uint64_t imm_index, svint16_t x) + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfTwoVectors(tuple1,, imm_index, x); + + /// + /// svint32x2_t svset2[_s32](svint32x2_t tuple, uint64_t imm_index, svint32_t x) + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfTwoVectors(tuple1,, imm_index, x); + + /// + /// svint64x2_t svset2[_s64](svint64x2_t tuple, uint64_t imm_index, svint64_t x) + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfTwoVectors(tuple1,, imm_index, x); + + /// + /// svuint8x2_t svset2[_u8](svuint8x2_t tuple, uint64_t imm_index, svuint8_t x) + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfTwoVectors(tuple1,, imm_index, x); + + /// + /// svuint16x2_t svset2[_u16](svuint16x2_t tuple, uint64_t imm_index, svuint16_t x) + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfTwoVectors(tuple1,, imm_index, x); + + /// + /// svuint32x2_t svset2[_u32](svuint32x2_t tuple, uint64_t imm_index, svuint32_t x) + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfTwoVectors(tuple1,, imm_index, x); + + /// + /// svuint64x2_t svset2[_u64](svuint64x2_t tuple, uint64_t imm_index, svuint64_t x) + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfTwoVectors(tuple1,, imm_index, x); + + /// + /// svbfloat16x2_t svset2[_bf16](svbfloat16x2_t tuple, uint64_t imm_index, svbfloat16_t x) + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfTwoVectors(tuple1,, imm_index, x); + + /// + /// svfloat16x2_t svset2[_f16](svfloat16x2_t tuple, uint64_t imm_index, svfloat16_t x) + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfTwoVectors(tuple1,, imm_index, x); + + /// + /// svfloat32x2_t svset2[_f32](svfloat32x2_t tuple, uint64_t imm_index, svfloat32_t x) + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfTwoVectors(tuple1,, imm_index, x); + + /// + /// svfloat64x2_t svset2[_f64](svfloat64x2_t tuple, uint64_t imm_index, svfloat64_t x) + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfTwoVectors(tuple1,, imm_index, x); + + + /// CreateATupleOfFourVectors : Create a tuple of four vectors + + /// + /// svint8x4_t svcreate4[_s8](svint8_t x0, svint8_t x1, svint8_t x2, svint8_t x3) + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) => CreateATupleOfFourVectors(x0, x1, x2, x3); + + /// + /// svint16x4_t svcreate4[_s16](svint16_t x0, svint16_t x1, svint16_t x2, svint16_t x3) + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) => CreateATupleOfFourVectors(x0, x1, x2, x3); + + /// + /// svint32x4_t svcreate4[_s32](svint32_t x0, svint32_t x1, svint32_t x2, svint32_t x3) + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) => CreateATupleOfFourVectors(x0, x1, x2, x3); + + /// + /// svint64x4_t svcreate4[_s64](svint64_t x0, svint64_t x1, svint64_t x2, svint64_t x3) + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) => CreateATupleOfFourVectors(x0, x1, x2, x3); + + /// + /// svuint8x4_t svcreate4[_u8](svuint8_t x0, svuint8_t x1, svuint8_t x2, svuint8_t x3) + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) => CreateATupleOfFourVectors(x0, x1, x2, x3); + + /// + /// svuint16x4_t svcreate4[_u16](svuint16_t x0, svuint16_t x1, svuint16_t x2, svuint16_t x3) + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) => CreateATupleOfFourVectors(x0, x1, x2, x3); + + /// + /// svuint32x4_t svcreate4[_u32](svuint32_t x0, svuint32_t x1, svuint32_t x2, svuint32_t x3) + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) => CreateATupleOfFourVectors(x0, x1, x2, x3); + + /// + /// svuint64x4_t svcreate4[_u64](svuint64_t x0, svuint64_t x1, svuint64_t x2, svuint64_t x3) + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) => CreateATupleOfFourVectors(x0, x1, x2, x3); + + /// + /// svbfloat16x4_t svcreate4[_bf16](svbfloat16_t x0, svbfloat16_t x1, svbfloat16_t x2, svbfloat16_t x3) + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) => CreateATupleOfFourVectors(x0, x1, x2, x3); + + /// + /// svfloat16x4_t svcreate4[_f16](svfloat16_t x0, svfloat16_t x1, svfloat16_t x2, svfloat16_t x3) + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) => CreateATupleOfFourVectors(x0, x1, x2, x3); + + /// + /// svfloat32x4_t svcreate4[_f32](svfloat32_t x0, svfloat32_t x1, svfloat32_t x2, svfloat32_t x3) + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) => CreateATupleOfFourVectors(x0, x1, x2, x3); + + /// + /// svfloat64x4_t svcreate4[_f64](svfloat64_t x0, svfloat64_t x1, svfloat64_t x2, svfloat64_t x3) + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) => CreateATupleOfFourVectors(x0, x1, x2, x3); + + + /// CreateATupleOfThreeVectors : Create a tuple of three vectors + + /// + /// svint8x3_t svcreate3[_s8](svint8_t x0, svint8_t x1, svint8_t x2) + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) => CreateATupleOfThreeVectors(x0, x1, x2); + + /// + /// svint16x3_t svcreate3[_s16](svint16_t x0, svint16_t x1, svint16_t x2) + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) => CreateATupleOfThreeVectors(x0, x1, x2); + + /// + /// svint32x3_t svcreate3[_s32](svint32_t x0, svint32_t x1, svint32_t x2) + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) => CreateATupleOfThreeVectors(x0, x1, x2); + + /// + /// svint64x3_t svcreate3[_s64](svint64_t x0, svint64_t x1, svint64_t x2) + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) => CreateATupleOfThreeVectors(x0, x1, x2); + + /// + /// svuint8x3_t svcreate3[_u8](svuint8_t x0, svuint8_t x1, svuint8_t x2) + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) => CreateATupleOfThreeVectors(x0, x1, x2); + + /// + /// svuint16x3_t svcreate3[_u16](svuint16_t x0, svuint16_t x1, svuint16_t x2) + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) => CreateATupleOfThreeVectors(x0, x1, x2); + + /// + /// svuint32x3_t svcreate3[_u32](svuint32_t x0, svuint32_t x1, svuint32_t x2) + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) => CreateATupleOfThreeVectors(x0, x1, x2); + + /// + /// svuint64x3_t svcreate3[_u64](svuint64_t x0, svuint64_t x1, svuint64_t x2) + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) => CreateATupleOfThreeVectors(x0, x1, x2); + + /// + /// svbfloat16x3_t svcreate3[_bf16](svbfloat16_t x0, svbfloat16_t x1, svbfloat16_t x2) + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) => CreateATupleOfThreeVectors(x0, x1, x2); + + /// + /// svfloat16x3_t svcreate3[_f16](svfloat16_t x0, svfloat16_t x1, svfloat16_t x2) + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) => CreateATupleOfThreeVectors(x0, x1, x2); + + /// + /// svfloat32x3_t svcreate3[_f32](svfloat32_t x0, svfloat32_t x1, svfloat32_t x2) + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) => CreateATupleOfThreeVectors(x0, x1, x2); + + /// + /// svfloat64x3_t svcreate3[_f64](svfloat64_t x0, svfloat64_t x1, svfloat64_t x2) + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) => CreateATupleOfThreeVectors(x0, x1, x2); + + + /// CreateATupleOfTwoVectors : Create a tuple of two vectors + + /// + /// svint8x2_t svcreate2[_s8](svint8_t x0, svint8_t x1) + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) => CreateATupleOfTwoVectors(x0, x1); + + /// + /// svint16x2_t svcreate2[_s16](svint16_t x0, svint16_t x1) + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) => CreateATupleOfTwoVectors(x0, x1); + + /// + /// svint32x2_t svcreate2[_s32](svint32_t x0, svint32_t x1) + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) => CreateATupleOfTwoVectors(x0, x1); + + /// + /// svint64x2_t svcreate2[_s64](svint64_t x0, svint64_t x1) + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) => CreateATupleOfTwoVectors(x0, x1); + + /// + /// svuint8x2_t svcreate2[_u8](svuint8_t x0, svuint8_t x1) + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) => CreateATupleOfTwoVectors(x0, x1); + + /// + /// svuint16x2_t svcreate2[_u16](svuint16_t x0, svuint16_t x1) + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) => CreateATupleOfTwoVectors(x0, x1); + + /// + /// svuint32x2_t svcreate2[_u32](svuint32_t x0, svuint32_t x1) + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) => CreateATupleOfTwoVectors(x0, x1); + + /// + /// svuint64x2_t svcreate2[_u64](svuint64_t x0, svuint64_t x1) + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) => CreateATupleOfTwoVectors(x0, x1); + + /// + /// svbfloat16x2_t svcreate2[_bf16](svbfloat16_t x0, svbfloat16_t x1) + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) => CreateATupleOfTwoVectors(x0, x1); + + /// + /// svfloat16x2_t svcreate2[_f16](svfloat16_t x0, svfloat16_t x1) + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) => CreateATupleOfTwoVectors(x0, x1); + + /// + /// svfloat32x2_t svcreate2[_f32](svfloat32_t x0, svfloat32_t x1) + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) => CreateATupleOfTwoVectors(x0, x1); + + /// + /// svfloat64x2_t svcreate2[_f64](svfloat64_t x0, svfloat64_t x1) + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) => CreateATupleOfTwoVectors(x0, x1); + + + /// CreateAnUninitializedTupleOfFourVectors : Create an uninitialized tuple of four vectors + + /// + /// svint8x4_t svundef4_s8() + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() => CreateAnUninitializedTupleOfFourVectors(); + + /// + /// svint16x4_t svundef4_s16() + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() => CreateAnUninitializedTupleOfFourVectors(); + + /// + /// svint32x4_t svundef4_s32() + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() => CreateAnUninitializedTupleOfFourVectors(); + + /// + /// svint64x4_t svundef4_s64() + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() => CreateAnUninitializedTupleOfFourVectors(); + + /// + /// svuint8x4_t svundef4_u8() + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() => CreateAnUninitializedTupleOfFourVectors(); + + /// + /// svuint16x4_t svundef4_u16() + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() => CreateAnUninitializedTupleOfFourVectors(); + + /// + /// svuint32x4_t svundef4_u32() + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() => CreateAnUninitializedTupleOfFourVectors(); + + /// + /// svuint64x4_t svundef4_u64() + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() => CreateAnUninitializedTupleOfFourVectors(); + + /// + /// svbfloat16x4_t svundef4_bf16() + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() => CreateAnUninitializedTupleOfFourVectors(); + + /// + /// svfloat16x4_t svundef4_f16() + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() => CreateAnUninitializedTupleOfFourVectors(); + + /// + /// svfloat32x4_t svundef4_f32() + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() => CreateAnUninitializedTupleOfFourVectors(); + + /// + /// svfloat64x4_t svundef4_f64() + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() => CreateAnUninitializedTupleOfFourVectors(); + + + /// CreateAnUninitializedTupleOfThreeVectors : Create an uninitialized tuple of three vectors + + /// + /// svint8x3_t svundef3_s8() + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() => CreateAnUninitializedTupleOfThreeVectors(); + + /// + /// svint16x3_t svundef3_s16() + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() => CreateAnUninitializedTupleOfThreeVectors(); + + /// + /// svint32x3_t svundef3_s32() + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() => CreateAnUninitializedTupleOfThreeVectors(); + + /// + /// svint64x3_t svundef3_s64() + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() => CreateAnUninitializedTupleOfThreeVectors(); + + /// + /// svuint8x3_t svundef3_u8() + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() => CreateAnUninitializedTupleOfThreeVectors(); + + /// + /// svuint16x3_t svundef3_u16() + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() => CreateAnUninitializedTupleOfThreeVectors(); + + /// + /// svuint32x3_t svundef3_u32() + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() => CreateAnUninitializedTupleOfThreeVectors(); + + /// + /// svuint64x3_t svundef3_u64() + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() => CreateAnUninitializedTupleOfThreeVectors(); + + /// + /// svbfloat16x3_t svundef3_bf16() + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() => CreateAnUninitializedTupleOfThreeVectors(); + + /// + /// svfloat16x3_t svundef3_f16() + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() => CreateAnUninitializedTupleOfThreeVectors(); + + /// + /// svfloat32x3_t svundef3_f32() + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() => CreateAnUninitializedTupleOfThreeVectors(); + + /// + /// svfloat64x3_t svundef3_f64() + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() => CreateAnUninitializedTupleOfThreeVectors(); + + + /// CreateAnUninitializedTupleOfTwoVectors : Create an uninitialized tuple of two vectors + + /// + /// svint8x2_t svundef2_s8() + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() => CreateAnUninitializedTupleOfTwoVectors(); + + /// + /// svint16x2_t svundef2_s16() + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() => CreateAnUninitializedTupleOfTwoVectors(); + + /// + /// svint32x2_t svundef2_s32() + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() => CreateAnUninitializedTupleOfTwoVectors(); + + /// + /// svint64x2_t svundef2_s64() + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() => CreateAnUninitializedTupleOfTwoVectors(); + + /// + /// svuint8x2_t svundef2_u8() + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() => CreateAnUninitializedTupleOfTwoVectors(); + + /// + /// svuint16x2_t svundef2_u16() + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() => CreateAnUninitializedTupleOfTwoVectors(); + + /// + /// svuint32x2_t svundef2_u32() + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() => CreateAnUninitializedTupleOfTwoVectors(); + + /// + /// svuint64x2_t svundef2_u64() + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() => CreateAnUninitializedTupleOfTwoVectors(); + + /// + /// svbfloat16x2_t svundef2_bf16() + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() => CreateAnUninitializedTupleOfTwoVectors(); + + /// + /// svfloat16x2_t svundef2_f16() + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() => CreateAnUninitializedTupleOfTwoVectors(); + + /// + /// svfloat32x2_t svundef2_f32() + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() => CreateAnUninitializedTupleOfTwoVectors(); + + /// + /// svfloat64x2_t svundef2_f64() + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() => CreateAnUninitializedTupleOfTwoVectors(); + + + /// CreateAnUninitializedVector : Create an uninitialized vector + + /// + /// svint8_t svundef_s8() + /// + public static unsafe Vector CreateAnUninitializedVector() => CreateAnUninitializedVector(); + + /// + /// svint16_t svundef_s16() + /// + public static unsafe Vector CreateAnUninitializedVector() => CreateAnUninitializedVector(); + + /// + /// svint32_t svundef_s32() + /// + public static unsafe Vector CreateAnUninitializedVector() => CreateAnUninitializedVector(); + + /// + /// svint64_t svundef_s64() + /// + public static unsafe Vector CreateAnUninitializedVector() => CreateAnUninitializedVector(); + + /// + /// svuint8_t svundef_u8() + /// + public static unsafe Vector CreateAnUninitializedVector() => CreateAnUninitializedVector(); + + /// + /// svuint16_t svundef_u16() + /// + public static unsafe Vector CreateAnUninitializedVector() => CreateAnUninitializedVector(); + + /// + /// svuint32_t svundef_u32() + /// + public static unsafe Vector CreateAnUninitializedVector() => CreateAnUninitializedVector(); + + /// + /// svuint64_t svundef_u64() + /// + public static unsafe Vector CreateAnUninitializedVector() => CreateAnUninitializedVector(); + + /// + /// svbfloat16_t svundef_bf16() + /// + public static unsafe Vector CreateAnUninitializedVector() => CreateAnUninitializedVector(); + + /// + /// svfloat16_t svundef_f16() + /// + public static unsafe Vector CreateAnUninitializedVector() => CreateAnUninitializedVector(); + + /// + /// svfloat32_t svundef_f32() + /// + public static unsafe Vector CreateAnUninitializedVector() => CreateAnUninitializedVector(); + + /// + /// svfloat64_t svundef_f64() + /// + public static unsafe Vector CreateAnUninitializedVector() => CreateAnUninitializedVector(); + + + /// DuplicateSelectedScalarToVector : Broadcast a quadword of scalars + + /// + /// svint8_t svdupq[_n]_s8(int8_t x0, int8_t x1, int8_t x2, int8_t x3, int8_t x4, int8_t x5, int8_t x6, int8_t x7, int8_t x8, int8_t x9, int8_t x10, int8_t x11, int8_t x12, int8_t x13, int8_t x14, int8_t x15) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(sbyte x0, [ConstantExpected] byte index, sbyte x2, sbyte x3, sbyte x4, sbyte x5, sbyte x6, sbyte x7, sbyte x8, sbyte x9, sbyte x10, sbyte x11, sbyte x12, sbyte x13, sbyte x14, sbyte x15) => DuplicateSelectedScalarToVector(x0, index, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15); + + /// + /// svint16_t svdupq[_n]_s16(int16_t x0, int16_t x1, int16_t x2, int16_t x3, int16_t x4, int16_t x5, int16_t x6, int16_t x7) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(short x0, [ConstantExpected] byte index, short x2, short x3, short x4, short x5, short x6, short x7) => DuplicateSelectedScalarToVector(x0, index, x2, x3, x4, x5, x6, x7); + + /// + /// svint32_t svdupq[_n]_s32(int32_t x0, int32_t x1, int32_t x2, int32_t x3) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(int x0, [ConstantExpected] byte index, int x2, int x3) => DuplicateSelectedScalarToVector(x0, index, x2, x3); + + /// + /// svint64_t svdupq[_n]_s64(int64_t x0, int64_t x1) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(long x0, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(x0, index); + + /// + /// svuint8_t svdupq[_n]_u8(uint8_t x0, uint8_t x1, uint8_t x2, uint8_t x3, uint8_t x4, uint8_t x5, uint8_t x6, uint8_t x7, uint8_t x8, uint8_t x9, uint8_t x10, uint8_t x11, uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(byte x0, [ConstantExpected] byte index, byte x2, byte x3, byte x4, byte x5, byte x6, byte x7, byte x8, byte x9, byte x10, byte x11, byte x12, byte x13, byte x14, byte x15) => DuplicateSelectedScalarToVector(x0, index, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15); + + /// + /// svbool_t svdupq[_n]_b8(bool x0, bool x1, bool x2, bool x3, bool x4, bool x5, bool x6, bool x7, bool x8, bool x9, bool x10, bool x11, bool x12, bool x13, bool x14, bool x15) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(bool x0, [ConstantExpected] byte index, bool x2, bool x3, bool x4, bool x5, bool x6, bool x7, bool x8, bool x9, bool x10, bool x11, bool x12, bool x13, bool x14, bool x15) => DuplicateSelectedScalarToVector(x0, index, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15); + + /// + /// svuint16_t svdupq[_n]_u16(uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3, uint16_t x4, uint16_t x5, uint16_t x6, uint16_t x7) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(ushort x0, [ConstantExpected] byte index, ushort x2, ushort x3, ushort x4, ushort x5, ushort x6, ushort x7) => DuplicateSelectedScalarToVector(x0, index, x2, x3, x4, x5, x6, x7); + + /// + /// svbool_t svdupq[_n]_b16(bool x0, bool x1, bool x2, bool x3, bool x4, bool x5, bool x6, bool x7) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(bool x0, [ConstantExpected] byte index, bool x2, bool x3, bool x4, bool x5, bool x6, bool x7) => DuplicateSelectedScalarToVector(x0, index, x2, x3, x4, x5, x6, x7); + + /// + /// svuint32_t svdupq[_n]_u32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(uint x0, [ConstantExpected] byte index, uint x2, uint x3) => DuplicateSelectedScalarToVector(x0, index, x2, x3); + + /// + /// svbool_t svdupq[_n]_b32(bool x0, bool x1, bool x2, bool x3) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(bool x0, [ConstantExpected] byte index, bool x2, bool x3) => DuplicateSelectedScalarToVector(x0, index, x2, x3); + + /// + /// svuint64_t svdupq[_n]_u64(uint64_t x0, uint64_t x1) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(ulong x0, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(x0, index); + + /// + /// svbool_t svdupq[_n]_b64(bool x0, bool x1) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(bool x0, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(x0, index); + + /// + /// svbfloat16_t svdupq[_n]_bf16(bfloat16_t x0, bfloat16_t x1, bfloat16_t x2, bfloat16_t x3, bfloat16_t x4, bfloat16_t x5, bfloat16_t x6, bfloat16_t x7) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(bfloat16 x0, [ConstantExpected] byte index, bfloat16 x2, bfloat16 x3, bfloat16 x4, bfloat16 x5, bfloat16 x6, bfloat16 x7) => DuplicateSelectedScalarToVector(x0, index, x2, x3, x4, x5, x6, x7); + + /// + /// svfloat16_t svdupq[_n]_f16(float16_t x0, float16_t x1, float16_t x2, float16_t x3, float16_t x4, float16_t x5, float16_t x6, float16_t x7) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(half x0, [ConstantExpected] byte index, half x2, half x3, half x4, half x5, half x6, half x7) => DuplicateSelectedScalarToVector(x0, index, x2, x3, x4, x5, x6, x7); + + /// + /// svfloat32_t svdupq[_n]_f32(float32_t x0, float32_t x1, float32_t x2, float32_t x3) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(float x0, [ConstantExpected] byte index, float x2, float x3) => DuplicateSelectedScalarToVector(x0, index, x2, x3); + + /// + /// svfloat64_t svdupq[_n]_f64(float64_t x0, float64_t x1) + /// + public static unsafe Vector DuplicateSelectedScalarToVector(double x0, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(x0, index); + + + /// ExtractOneVectorFromATupleOfFourVectors : Extract one vector from a tuple of four vectors + + /// + /// svint8_t svget4[_s8](svint8x4_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) => ExtractOneVectorFromATupleOfFourVectors(tuple1,, imm_index); + + /// + /// svint16_t svget4[_s16](svint16x4_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) => ExtractOneVectorFromATupleOfFourVectors(tuple1,, imm_index); + + /// + /// svint32_t svget4[_s32](svint32x4_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) => ExtractOneVectorFromATupleOfFourVectors(tuple1,, imm_index); + + /// + /// svint64_t svget4[_s64](svint64x4_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) => ExtractOneVectorFromATupleOfFourVectors(tuple1,, imm_index); + + /// + /// svuint8_t svget4[_u8](svuint8x4_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) => ExtractOneVectorFromATupleOfFourVectors(tuple1,, imm_index); + + /// + /// svuint16_t svget4[_u16](svuint16x4_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) => ExtractOneVectorFromATupleOfFourVectors(tuple1,, imm_index); + + /// + /// svuint32_t svget4[_u32](svuint32x4_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) => ExtractOneVectorFromATupleOfFourVectors(tuple1,, imm_index); + + /// + /// svuint64_t svget4[_u64](svuint64x4_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) => ExtractOneVectorFromATupleOfFourVectors(tuple1,, imm_index); + + /// + /// svbfloat16_t svget4[_bf16](svbfloat16x4_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) => ExtractOneVectorFromATupleOfFourVectors(tuple1,, imm_index); + + /// + /// svfloat16_t svget4[_f16](svfloat16x4_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) => ExtractOneVectorFromATupleOfFourVectors(tuple1,, imm_index); + + /// + /// svfloat32_t svget4[_f32](svfloat32x4_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) => ExtractOneVectorFromATupleOfFourVectors(tuple1,, imm_index); + + /// + /// svfloat64_t svget4[_f64](svfloat64x4_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) => ExtractOneVectorFromATupleOfFourVectors(tuple1,, imm_index); + + + /// ExtractOneVectorFromATupleOfThreeVectors : Extract one vector from a tuple of three vectors + + /// + /// svint8_t svget3[_s8](svint8x3_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) => ExtractOneVectorFromATupleOfThreeVectors(tuple1,, imm_index); + + /// + /// svint16_t svget3[_s16](svint16x3_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) => ExtractOneVectorFromATupleOfThreeVectors(tuple1,, imm_index); + + /// + /// svint32_t svget3[_s32](svint32x3_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) => ExtractOneVectorFromATupleOfThreeVectors(tuple1,, imm_index); + + /// + /// svint64_t svget3[_s64](svint64x3_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) => ExtractOneVectorFromATupleOfThreeVectors(tuple1,, imm_index); + + /// + /// svuint8_t svget3[_u8](svuint8x3_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) => ExtractOneVectorFromATupleOfThreeVectors(tuple1,, imm_index); + + /// + /// svuint16_t svget3[_u16](svuint16x3_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) => ExtractOneVectorFromATupleOfThreeVectors(tuple1,, imm_index); + + /// + /// svuint32_t svget3[_u32](svuint32x3_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) => ExtractOneVectorFromATupleOfThreeVectors(tuple1,, imm_index); + + /// + /// svuint64_t svget3[_u64](svuint64x3_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) => ExtractOneVectorFromATupleOfThreeVectors(tuple1,, imm_index); + + /// + /// svbfloat16_t svget3[_bf16](svbfloat16x3_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) => ExtractOneVectorFromATupleOfThreeVectors(tuple1,, imm_index); + + /// + /// svfloat16_t svget3[_f16](svfloat16x3_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) => ExtractOneVectorFromATupleOfThreeVectors(tuple1,, imm_index); + + /// + /// svfloat32_t svget3[_f32](svfloat32x3_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) => ExtractOneVectorFromATupleOfThreeVectors(tuple1,, imm_index); + + /// + /// svfloat64_t svget3[_f64](svfloat64x3_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) => ExtractOneVectorFromATupleOfThreeVectors(tuple1,, imm_index); + + + /// ExtractOneVectorFromATupleOfTwoVectors : Extract one vector from a tuple of two vectors + + /// + /// svint8_t svget2[_s8](svint8x2_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) => ExtractOneVectorFromATupleOfTwoVectors(tuple1,, imm_index); + + /// + /// svint16_t svget2[_s16](svint16x2_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) => ExtractOneVectorFromATupleOfTwoVectors(tuple1,, imm_index); + + /// + /// svint32_t svget2[_s32](svint32x2_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) => ExtractOneVectorFromATupleOfTwoVectors(tuple1,, imm_index); + + /// + /// svint64_t svget2[_s64](svint64x2_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) => ExtractOneVectorFromATupleOfTwoVectors(tuple1,, imm_index); + + /// + /// svuint8_t svget2[_u8](svuint8x2_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) => ExtractOneVectorFromATupleOfTwoVectors(tuple1,, imm_index); + + /// + /// svuint16_t svget2[_u16](svuint16x2_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) => ExtractOneVectorFromATupleOfTwoVectors(tuple1,, imm_index); + + /// + /// svuint32_t svget2[_u32](svuint32x2_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) => ExtractOneVectorFromATupleOfTwoVectors(tuple1,, imm_index); + + /// + /// svuint64_t svget2[_u64](svuint64x2_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) => ExtractOneVectorFromATupleOfTwoVectors(tuple1,, imm_index); + + /// + /// svbfloat16_t svget2[_bf16](svbfloat16x2_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) => ExtractOneVectorFromATupleOfTwoVectors(tuple1,, imm_index); + + /// + /// svfloat16_t svget2[_f16](svfloat16x2_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) => ExtractOneVectorFromATupleOfTwoVectors(tuple1,, imm_index); + + /// + /// svfloat32_t svget2[_f32](svfloat32x2_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) => ExtractOneVectorFromATupleOfTwoVectors(tuple1,, imm_index); + + /// + /// svfloat64_t svget2[_f64](svfloat64x2_t tuple, uint64_t imm_index) + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) => ExtractOneVectorFromATupleOfTwoVectors(tuple1,, imm_index); + + + /// ReinterpretVectorContents : Reinterpret vector contents + + /// + /// svint8_t svreinterpret_s8[_bf16](svbfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint8_t svreinterpret_s8[_f16](svfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint8_t svreinterpret_s8[_f32](svfloat32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint8_t svreinterpret_s8[_f64](svfloat64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint8_t svreinterpret_s8[_s8](svint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint8_t svreinterpret_s8[_s16](svint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint8_t svreinterpret_s8[_s32](svint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint8_t svreinterpret_s8[_s64](svint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint8_t svreinterpret_s8[_u8](svuint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint8_t svreinterpret_s8[_u16](svuint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint8_t svreinterpret_s8[_u32](svuint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint8_t svreinterpret_s8[_u64](svuint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint16_t svreinterpret_s16[_s8](svint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint16_t svreinterpret_s16[_s32](svint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint16_t svreinterpret_s16[_s64](svint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint16_t svreinterpret_s16[_u8](svuint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint16_t svreinterpret_s16[_u16](svuint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint16_t svreinterpret_s16[_u32](svuint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint16_t svreinterpret_s16[_bf16](svbfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint16_t svreinterpret_s16[_f16](svfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint16_t svreinterpret_s16[_f32](svfloat32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint16_t svreinterpret_s16[_f64](svfloat64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint16_t svreinterpret_s16[_s16](svint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint16_t svreinterpret_s16[_u64](svuint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint32_t svreinterpret_s32[_s8](svint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint32_t svreinterpret_s32[_s16](svint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint32_t svreinterpret_s32[_s64](svint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint32_t svreinterpret_s32[_u8](svuint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint32_t svreinterpret_s32[_u16](svuint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint32_t svreinterpret_s32[_u32](svuint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint32_t svreinterpret_s32[_u64](svuint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint32_t svreinterpret_s32[_bf16](svbfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint32_t svreinterpret_s32[_f16](svfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint32_t svreinterpret_s32[_f32](svfloat32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint32_t svreinterpret_s32[_f64](svfloat64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint32_t svreinterpret_s32[_s32](svint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint64_t svreinterpret_s64[_s8](svint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint64_t svreinterpret_s64[_s16](svint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint64_t svreinterpret_s64[_s32](svint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint64_t svreinterpret_s64[_u8](svuint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint64_t svreinterpret_s64[_u16](svuint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint64_t svreinterpret_s64[_u32](svuint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint64_t svreinterpret_s64[_u64](svuint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint64_t svreinterpret_s64[_bf16](svbfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint64_t svreinterpret_s64[_f16](svfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint64_t svreinterpret_s64[_f32](svfloat32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint64_t svreinterpret_s64[_f64](svfloat64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint64_t svreinterpret_s64[_s64](svint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint8_t svreinterpret_u8[_s8](svint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint8_t svreinterpret_u8[_s16](svint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint8_t svreinterpret_u8[_s32](svint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint8_t svreinterpret_u8[_s64](svint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint8_t svreinterpret_u8[_u16](svuint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint8_t svreinterpret_u8[_u32](svuint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint8_t svreinterpret_u8[_u64](svuint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint8_t svreinterpret_u8[_bf16](svbfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint8_t svreinterpret_u8[_f16](svfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint8_t svreinterpret_u8[_f32](svfloat32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint8_t svreinterpret_u8[_f64](svfloat64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint8_t svreinterpret_u8[_u8](svuint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint16_t svreinterpret_u16[_s8](svint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint16_t svreinterpret_u16[_s16](svint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint16_t svreinterpret_u16[_s32](svint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint16_t svreinterpret_u16[_s64](svint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint16_t svreinterpret_u16[_u8](svuint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint16_t svreinterpret_u16[_u32](svuint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint16_t svreinterpret_u16[_u64](svuint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint16_t svreinterpret_u16[_bf16](svbfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint16_t svreinterpret_u16[_f16](svfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint16_t svreinterpret_u16[_f32](svfloat32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint16_t svreinterpret_u16[_f64](svfloat64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint16_t svreinterpret_u16[_u16](svuint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint32_t svreinterpret_u32[_s8](svint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint32_t svreinterpret_u32[_s16](svint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint32_t svreinterpret_u32[_s32](svint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint32_t svreinterpret_u32[_s64](svint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint32_t svreinterpret_u32[_u8](svuint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint32_t svreinterpret_u32[_u16](svuint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint32_t svreinterpret_u32[_u64](svuint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint32_t svreinterpret_u32[_bf16](svbfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint32_t svreinterpret_u32[_f16](svfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint32_t svreinterpret_u32[_f32](svfloat32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint32_t svreinterpret_u32[_f64](svfloat64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint32_t svreinterpret_u32[_u32](svuint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint64_t svreinterpret_u64[_s8](svint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint64_t svreinterpret_u64[_s16](svint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint64_t svreinterpret_u64[_s32](svint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint64_t svreinterpret_u64[_s64](svint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint64_t svreinterpret_u64[_u8](svuint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint64_t svreinterpret_u64[_u16](svuint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint64_t svreinterpret_u64[_u32](svuint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint64_t svreinterpret_u64[_bf16](svbfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint64_t svreinterpret_u64[_f16](svfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint64_t svreinterpret_u64[_f32](svfloat32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint64_t svreinterpret_u64[_f64](svfloat64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint64_t svreinterpret_u64[_u64](svuint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svbfloat16_t svreinterpret_bf16[_bf16](svbfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svbfloat16_t svreinterpret_bf16[_s8](svint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svbfloat16_t svreinterpret_bf16[_s16](svint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svbfloat16_t svreinterpret_bf16[_s32](svint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svbfloat16_t svreinterpret_bf16[_s64](svint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svbfloat16_t svreinterpret_bf16[_u8](svuint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svbfloat16_t svreinterpret_bf16[_u16](svuint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svbfloat16_t svreinterpret_bf16[_u32](svuint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svbfloat16_t svreinterpret_bf16[_u64](svuint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svbfloat16_t svreinterpret_bf16[_f16](svfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svbfloat16_t svreinterpret_bf16[_f32](svfloat32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svbfloat16_t svreinterpret_bf16[_f64](svfloat64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat16_t svreinterpret_f16[_s8](svint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat16_t svreinterpret_f16[_s16](svint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat16_t svreinterpret_f16[_s32](svint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat16_t svreinterpret_f16[_u8](svuint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat16_t svreinterpret_f16[_u16](svuint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat16_t svreinterpret_f16[_u32](svuint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat16_t svreinterpret_f16[_bf16](svbfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat16_t svreinterpret_f16[_f16](svfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat16_t svreinterpret_f16[_s64](svint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat16_t svreinterpret_f16[_u64](svuint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat16_t svreinterpret_f16[_f32](svfloat32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat16_t svreinterpret_f16[_f64](svfloat64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat32_t svreinterpret_f32[_s8](svint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat32_t svreinterpret_f32[_s16](svint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat32_t svreinterpret_f32[_s32](svint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat32_t svreinterpret_f32[_s64](svint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat32_t svreinterpret_f32[_u8](svuint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat32_t svreinterpret_f32[_u16](svuint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat32_t svreinterpret_f32[_u32](svuint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat32_t svreinterpret_f32[_u64](svuint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat32_t svreinterpret_f32[_bf16](svbfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat32_t svreinterpret_f32[_f16](svfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat32_t svreinterpret_f32[_f32](svfloat32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat32_t svreinterpret_f32[_f64](svfloat64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat64_t svreinterpret_f64[_bf16](svbfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat64_t svreinterpret_f64[_f16](svfloat16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat64_t svreinterpret_f64[_f32](svfloat32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat64_t svreinterpret_f64[_f64](svfloat64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat64_t svreinterpret_f64[_s8](svint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat64_t svreinterpret_f64[_s16](svint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat64_t svreinterpret_f64[_s32](svint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat64_t svreinterpret_f64[_s64](svint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat64_t svreinterpret_f64[_u8](svuint8_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat64_t svreinterpret_f64[_u16](svuint16_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat64_t svreinterpret_f64[_u32](svuint32_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat64_t svreinterpret_f64[_u64](svuint64_t op) + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + } +} + diff --git a/sve_api/out_cs_api/SveSha3.PlatformNotSupported.cs b/sve_api/out_cs_api/SveSha3.PlatformNotSupported.cs new file mode 100644 index 0000000000000..e1ff09754213c --- /dev/null +++ b/sve_api/out_cs_api/SveSha3.PlatformNotSupported.cs @@ -0,0 +1,44 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveSha3 : AdvSimd + { + internal SveSha3() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// BitwiseRotateLeftBy1AndXor : Bitwise rotate left by 1 and exclusive OR + + /// + /// svint64_t svrax1[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector BitwiseRotateLeftBy1AndXor(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + /// + /// svuint64_t svrax1[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector BitwiseRotateLeftBy1AndXor(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + } +} + diff --git a/sve_api/out_cs_api/SveSha3.System.Runtime.Intrinsics.cs b/sve_api/out_cs_api/SveSha3.System.Runtime.Intrinsics.cs new file mode 100644 index 0000000000000..15803b5a05eeb --- /dev/null +++ b/sve_api/out_cs_api/SveSha3.System.Runtime.Intrinsics.cs @@ -0,0 +1,3 @@ + public static System.Numerics.Vector BitwiseRotateLeftBy1AndXor(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector BitwiseRotateLeftBy1AndXor(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + diff --git a/sve_api/out_cs_api/SveSha3.cs b/sve_api/out_cs_api/SveSha3.cs new file mode 100644 index 0000000000000..75aa55b3c84b9 --- /dev/null +++ b/sve_api/out_cs_api/SveSha3.cs @@ -0,0 +1,44 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveSha3 : AdvSimd + { + internal SveSha3() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// BitwiseRotateLeftBy1AndXor : Bitwise rotate left by 1 and exclusive OR + + /// + /// svint64_t svrax1[_s64](svint64_t op1, svint64_t op2) + /// + public static unsafe Vector BitwiseRotateLeftBy1AndXor(Vector left, Vector right) => BitwiseRotateLeftBy1AndXor(left, right); + + /// + /// svuint64_t svrax1[_u64](svuint64_t op1, svuint64_t op2) + /// + public static unsafe Vector BitwiseRotateLeftBy1AndXor(Vector left, Vector right) => BitwiseRotateLeftBy1AndXor(left, right); + + } +} + diff --git a/sve_api/out_cs_api/SveSm4.PlatformNotSupported.cs b/sve_api/out_cs_api/SveSm4.PlatformNotSupported.cs new file mode 100644 index 0000000000000..adb17a159f0ea --- /dev/null +++ b/sve_api/out_cs_api/SveSm4.PlatformNotSupported.cs @@ -0,0 +1,47 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveSm4 : AdvSimd + { + internal SveSm4() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// Sm4EncryptionAndDecryption : SM4 encryption and decryption + + /// + /// svuint32_t svsm4e[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector Sm4EncryptionAndDecryption(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + + /// Sm4KeyUpdates : SM4 key updates + + /// + /// svuint32_t svsm4ekey[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector Sm4KeyUpdates(Vector left, Vector right) { throw new PlatformNotSupportedException(); } + + } +} + diff --git a/sve_api/out_cs_api/SveSm4.System.Runtime.Intrinsics.cs b/sve_api/out_cs_api/SveSm4.System.Runtime.Intrinsics.cs new file mode 100644 index 0000000000000..dbb92dda87c7b --- /dev/null +++ b/sve_api/out_cs_api/SveSm4.System.Runtime.Intrinsics.cs @@ -0,0 +1,3 @@ + public static System.Numerics.Vector Sm4EncryptionAndDecryption(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + public static System.Numerics.Vector Sm4KeyUpdates(System.Numerics.Vector left, System.Numerics.Vector right) { throw null; } + diff --git a/sve_api/out_cs_api/SveSm4.cs b/sve_api/out_cs_api/SveSm4.cs new file mode 100644 index 0000000000000..0ffb1c7ecd82e --- /dev/null +++ b/sve_api/out_cs_api/SveSm4.cs @@ -0,0 +1,47 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveSm4 : AdvSimd + { + internal SveSm4() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// Sm4EncryptionAndDecryption : SM4 encryption and decryption + + /// + /// svuint32_t svsm4e[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector Sm4EncryptionAndDecryption(Vector left, Vector right) => Sm4EncryptionAndDecryption(left, right); + + + /// Sm4KeyUpdates : SM4 key updates + + /// + /// svuint32_t svsm4ekey[_u32](svuint32_t op1, svuint32_t op2) + /// + public static unsafe Vector Sm4KeyUpdates(Vector left, Vector right) => Sm4KeyUpdates(left, right); + + } +} + diff --git a/sve_api/out_helper_api/Sha3.cs b/sve_api/out_helper_api/Sha3.cs new file mode 100644 index 0000000000000..d8d9fa67fbd24 --- /dev/null +++ b/sve_api/out_helper_api/Sha3.cs @@ -0,0 +1,230 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class Sha3 : AdvSimd + { + internal Sha3() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// BitwiseClearXor : Bit Clear and Exclusive OR performs a bitwise AND of the 128-bit vector in a source SIMD&FP register and the complement of the vector in another source SIMD&FP register, then performs a bitwise exclusive OR of the resulting vector and the vector in a third source SIMD&FP register, and writes the result to the destination SIMD&FP register. + + /// + /// int8x16_t vbcaxq_s8(int8x16_t a, int8x16_t b, int8x16_t c) + /// BCAX Vd.16B,Vn.16B,Vm.16B,Va.16B + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BCAX .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bcax, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) => BitwiseClearXor(xor, value, mask); + + /// + /// int16x8_t vbcaxq_s16(int16x8_t a, int16x8_t b, int16x8_t c) + /// BCAX Vd.16B,Vn.16B,Vm.16B,Va.16B + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BCAX .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bcax, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) => BitwiseClearXor(xor, value, mask); + + /// + /// int32x4_t vbcaxq_s32(int32x4_t a, int32x4_t b, int32x4_t c) + /// BCAX Vd.16B,Vn.16B,Vm.16B,Va.16B + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BCAX .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bcax, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) => BitwiseClearXor(xor, value, mask); + + /// + /// int64x2_t vbcaxq_s64(int64x2_t a, int64x2_t b, int64x2_t c) + /// BCAX Vd.16B,Vn.16B,Vm.16B,Va.16B + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BCAX .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bcax, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) => BitwiseClearXor(xor, value, mask); + + /// + /// uint8x16_t vbcaxq_u8(uint8x16_t a, uint8x16_t b, uint8x16_t c) + /// BCAX Vd.16B,Vn.16B,Vm.16B,Va.16B + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BCAX .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bcax, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) => BitwiseClearXor(xor, value, mask); + + /// + /// uint16x8_t vbcaxq_u16(uint16x8_t a, uint16x8_t b, uint16x8_t c) + /// BCAX Vd.16B,Vn.16B,Vm.16B,Va.16B + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BCAX .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bcax, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) => BitwiseClearXor(xor, value, mask); + + /// + /// uint32x4_t vbcaxq_u32(uint32x4_t a, uint32x4_t b, uint32x4_t c) + /// BCAX Vd.16B,Vn.16B,Vm.16B,Va.16B + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BCAX .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bcax, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) => BitwiseClearXor(xor, value, mask); + + /// + /// uint64x2_t vbcaxq_u64(uint64x2_t a, uint64x2_t b, uint64x2_t c) + /// BCAX Vd.16B,Vn.16B,Vm.16B,Va.16B + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BCAX .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bcax, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector128 BitwiseClearXor(Vector128 xor, Vector128 value, Vector128 mask) => BitwiseClearXor(xor, value, mask); + + + /// BitwiseRotateLeftBy1AndXor : Rotate and Exclusive OR rotates each 64-bit element of the 128-bit vector in a source SIMD&FP register left by 1, performs a bitwise exclusive OR of the resulting 128-bit vector and the vector in another source SIMD&FP register, and writes the result to the destination SIMD&FP register. + + /// + /// uint64x2_t vrax1q_u64(uint64x2_t a, uint64x2_t b) + /// RAX1 Vd.2D,Vn.2D,Vm.2D + /// + /// codegenarm64test: + /// IF_SVE_GJ_3A RAX1 .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_rax1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector128 BitwiseRotateLeftBy1AndXor(Vector128 a, Vector128 b) => BitwiseRotateLeftBy1AndXor(a, b); + + + /// Xor : Three-way Exclusive OR performs a three-way exclusive OR of the values in the three source SIMD&FP registers, and writes the result to the destination SIMD&FP register. + + /// + /// int8x16_t veor3q_s8(int8x16_t a, int8x16_t b, int8x16_t c) + /// EOR3 Vd.16B,Vn.16B,Vm.16B,Va.16B + /// + /// codegenarm64test: + /// IF_SVE_AV_3A EOR3 .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_eor3, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) => Xor(value1, value2, value3); + + /// + /// int16x8_t veor3q_s16(int16x8_t a, int16x8_t b, int16x8_t c) + /// EOR3 Vd.16B,Vn.16B,Vm.16B,Va.16B + /// + /// codegenarm64test: + /// IF_SVE_AV_3A EOR3 .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_eor3, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) => Xor(value1, value2, value3); + + /// + /// int32x4_t veor3q_s32(int32x4_t a, int32x4_t b, int32x4_t c) + /// EOR3 Vd.16B,Vn.16B,Vm.16B,Va.16B + /// + /// codegenarm64test: + /// IF_SVE_AV_3A EOR3 .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_eor3, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) => Xor(value1, value2, value3); + + /// + /// int64x2_t veor3q_s64(int64x2_t a, int64x2_t b, int64x2_t c) + /// EOR3 Vd.16B,Vn.16B,Vm.16B,Va.16B + /// + /// codegenarm64test: + /// IF_SVE_AV_3A EOR3 .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_eor3, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) => Xor(value1, value2, value3); + + /// + /// uint8x16_t veor3q_u8(uint8x16_t a, uint8x16_t b, uint8x16_t c) + /// EOR3 Vd.16B,Vn.16B,Vm.16B,Va.16B + /// + /// codegenarm64test: + /// IF_SVE_AV_3A EOR3 .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_eor3, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) => Xor(value1, value2, value3); + + /// + /// uint16x8_t veor3q_u16(uint16x8_t a, uint16x8_t b, uint16x8_t c) + /// EOR3 Vd.16B,Vn.16B,Vm.16B,Va.16B + /// + /// codegenarm64test: + /// IF_SVE_AV_3A EOR3 .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_eor3, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) => Xor(value1, value2, value3); + + /// + /// uint32x4_t veor3q_u32(uint32x4_t a, uint32x4_t b, uint32x4_t c) + /// EOR3 Vd.16B,Vn.16B,Vm.16B,Va.16B + /// + /// codegenarm64test: + /// IF_SVE_AV_3A EOR3 .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_eor3, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) => Xor(value1, value2, value3); + + /// + /// uint64x2_t veor3q_u64(uint64x2_t a, uint64x2_t b, uint64x2_t c) + /// EOR3 Vd.16B,Vn.16B,Vm.16B,Va.16B + /// + /// codegenarm64test: + /// IF_SVE_AV_3A EOR3 .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_eor3, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector128 Xor(Vector128 value1, Vector128 value2, Vector128 value3) => Xor(value1, value2, value3); + + + /// XorRotateRight : Exclusive OR and Rotate performs a bitwise exclusive OR of the 128-bit vectors in the two source SIMD&FP registers, rotates each 64-bit element of the resulting 128-bit vector right by the value specified by a 6-bit immediate value, and writes the result to the destination SIMD&FP register. + + /// + /// uint64x2_t vxarq_u64(uint64x2_t a, uint64x2_t b, const int imm6) + /// XAR Vd.2D,Vn.2D,Vm.2D,imm6 + /// + /// codegenarm64test: + /// IF_SVE_AW_2A XAR ., ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V0, REG_V1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V2, REG_V3, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V4, REG_V5, 2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V6, REG_V7, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V8, REG_V9, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V10, REG_V11, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V12, REG_V13, 4, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V14, REG_V15, 64, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector128 XorRotateRight(Vector128 left, Vector128 right, [ConstantExpected] byte count) => XorRotateRight(left, right, count); + + } +} + diff --git a/sve_api/out_helper_api/Sm4.cs b/sve_api/out_helper_api/Sm4.cs new file mode 100644 index 0000000000000..0c5ccfa7afe4d --- /dev/null +++ b/sve_api/out_helper_api/Sm4.cs @@ -0,0 +1,57 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class Sm4 : AdvSimd + { + internal Sm4() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// Sm4EncryptionAndDecryption : SM4 Encode takes input data as a 128-bit vector from the first source SIMD&FP register, and four iterations of the round key held as the elements of the 128-bit vector in the second source SIMD&FP register. It encrypts the data by four rounds, in accordance with the SM4 standard, returning the 128-bit result to the destination SIMD&FP register. + + /// + /// uint32x4_t vsm4eq_u32(uint32x4_t a, uint32x4_t b) + /// SM4E Vd.4S,Vn.4S + /// + /// codegenarm64test: + /// IF_SVE_GK_2A SM4E .S, + /// theEmitter->emitIns_R_R(INS_sve_sm4e, EA_SCALABLE, REG_V3, REG_V5, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector128 Sm4EncryptionAndDecryption(Vector128 a, Vector128 b) => Sm4EncryptionAndDecryption(a, b); + + + /// Sm4KeyUpdates : SM4 Key takes an input as a 128-bit vector from the first source SIMD&FP register and a 128-bit constant from the second SIMD&FP register. It derives four iterations of the output key, in accordance with the SM4 standard, returning the 128-bit result to the destination SIMD&FP register. + + /// + /// uint32x4_t vsm4ekeyq_u32(uint32x4_t a, uint32x4_t b) + /// SM4EKEY Vd.4S,Vn.4S,Vm.4S + /// + /// codegenarm64test: + /// IF_SVE_GJ_3A SM4EKEY .S, .S, .S + /// theEmitter->emitIns_R_R_R(INS_sve_sm4ekey, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector128 Sm4KeyUpdates(Vector128 a, Vector128 b) => Sm4KeyUpdates(a, b); + + } +} + diff --git a/sve_api/out_helper_api/Sve.cs b/sve_api/out_helper_api/Sve.cs new file mode 100644 index 0000000000000..be5aff498f4ec --- /dev/null +++ b/sve_api/out_helper_api/Sve.cs @@ -0,0 +1,34511 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class Sve : AdvSimd + { + internal Sve() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// Abs : Absolute value + + /// + /// svint8_t svabs[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) + /// ABS Ztied.B, Pg/M, Zop.B + /// MOVPRFX Zresult, Zinactive; ABS Zresult.B, Pg/M, Zop.B + /// svint8_t svabs[_s8]_x(svbool_t pg, svint8_t op) + /// ABS Ztied.B, Pg/M, Ztied.B + /// MOVPRFX Zresult, Zop; ABS Zresult.B, Pg/M, Zop.B + /// svint8_t svabs[_s8]_z(svbool_t pg, svint8_t op) + /// MOVPRFX Zresult.B, Pg/Z, Zop.B; ABS Zresult.B, Pg/M, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_AQ_3A ABS ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_abs, EA_SCALABLE, REG_V24, REG_P7, REG_V7, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Abs(Vector value) => Abs(value); + + /// + /// svint16_t svabs[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) + /// ABS Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; ABS Zresult.H, Pg/M, Zop.H + /// svint16_t svabs[_s16]_x(svbool_t pg, svint16_t op) + /// ABS Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; ABS Zresult.H, Pg/M, Zop.H + /// svint16_t svabs[_s16]_z(svbool_t pg, svint16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; ABS Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_AQ_3A ABS ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_abs, EA_SCALABLE, REG_V24, REG_P7, REG_V7, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Abs(Vector value) => Abs(value); + + /// + /// svint32_t svabs[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// ABS Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; ABS Zresult.S, Pg/M, Zop.S + /// svint32_t svabs[_s32]_x(svbool_t pg, svint32_t op) + /// ABS Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; ABS Zresult.S, Pg/M, Zop.S + /// svint32_t svabs[_s32]_z(svbool_t pg, svint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; ABS Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AQ_3A ABS ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_abs, EA_SCALABLE, REG_V24, REG_P7, REG_V7, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Abs(Vector value) => Abs(value); + + /// + /// svint64_t svabs[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// ABS Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; ABS Zresult.D, Pg/M, Zop.D + /// svint64_t svabs[_s64]_x(svbool_t pg, svint64_t op) + /// ABS Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; ABS Zresult.D, Pg/M, Zop.D + /// svint64_t svabs[_s64]_z(svbool_t pg, svint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; ABS Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AQ_3A ABS ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_abs, EA_SCALABLE, REG_V24, REG_P7, REG_V7, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Abs(Vector value) => Abs(value); + + /// + /// svfloat32_t svabs[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) + /// FABS Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; FABS Zresult.S, Pg/M, Zop.S + /// svfloat32_t svabs[_f32]_x(svbool_t pg, svfloat32_t op) + /// FABS Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; FABS Zresult.S, Pg/M, Zop.S + /// svfloat32_t svabs[_f32]_z(svbool_t pg, svfloat32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; FABS Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AP_3A FABS ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_fabs, EA_SCALABLE, REG_V27, REG_P4, REG_V4, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Abs(Vector value) => Abs(value); + + /// + /// svfloat64_t svabs[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) + /// FABS Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; FABS Zresult.D, Pg/M, Zop.D + /// svfloat64_t svabs[_f64]_x(svbool_t pg, svfloat64_t op) + /// FABS Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; FABS Zresult.D, Pg/M, Zop.D + /// svfloat64_t svabs[_f64]_z(svbool_t pg, svfloat64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; FABS Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AP_3A FABS ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_fabs, EA_SCALABLE, REG_V27, REG_P4, REG_V4, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Abs(Vector value) => Abs(value); + + + /// AbsoluteCompareGreaterThan : Absolute compare greater than + + /// + /// svbool_t svacgt[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FACGT Presult.S, Pg/Z, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FACGT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_facgt, EA_SCALABLE, REG_P15, REG_P1, REG_V20, REG_V21, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AbsoluteCompareGreaterThan(Vector left, Vector right) => AbsoluteCompareGreaterThan(left, right); + + /// + /// svbool_t svacgt[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FACGT Presult.D, Pg/Z, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FACGT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_facgt, EA_SCALABLE, REG_P15, REG_P1, REG_V20, REG_V21, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AbsoluteCompareGreaterThan(Vector left, Vector right) => AbsoluteCompareGreaterThan(left, right); + + + /// AbsoluteCompareGreaterThanOrEqual : Absolute compare greater than or equal to + + /// + /// svbool_t svacge[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FACGE Presult.S, Pg/Z, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FACGE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_facge, EA_SCALABLE, REG_P0, REG_P0, REG_V10, REG_V31, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AbsoluteCompareGreaterThanOrEqual(Vector left, Vector right) => AbsoluteCompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svacge[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FACGE Presult.D, Pg/Z, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FACGE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_facge, EA_SCALABLE, REG_P0, REG_P0, REG_V10, REG_V31, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AbsoluteCompareGreaterThanOrEqual(Vector left, Vector right) => AbsoluteCompareGreaterThanOrEqual(left, right); + + + /// AbsoluteCompareLessThan : Absolute compare less than + + /// + /// svbool_t svaclt[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FACGT Presult.S, Pg/Z, Zop2.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FACGT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_facgt, EA_SCALABLE, REG_P15, REG_P1, REG_V20, REG_V21, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AbsoluteCompareLessThan(Vector left, Vector right) => AbsoluteCompareLessThan(left, right); + + /// + /// svbool_t svaclt[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FACGT Presult.D, Pg/Z, Zop2.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FACGT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_facgt, EA_SCALABLE, REG_P15, REG_P1, REG_V20, REG_V21, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AbsoluteCompareLessThan(Vector left, Vector right) => AbsoluteCompareLessThan(left, right); + + + /// AbsoluteCompareLessThanOrEqual : Absolute compare less than or equal to + + /// + /// svbool_t svacle[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FACGE Presult.S, Pg/Z, Zop2.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FACGE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_facge, EA_SCALABLE, REG_P0, REG_P0, REG_V10, REG_V31, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AbsoluteCompareLessThanOrEqual(Vector left, Vector right) => AbsoluteCompareLessThanOrEqual(left, right); + + /// + /// svbool_t svacle[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FACGE Presult.D, Pg/Z, Zop2.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FACGE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_facge, EA_SCALABLE, REG_P0, REG_P0, REG_V10, REG_V31, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AbsoluteCompareLessThanOrEqual(Vector left, Vector right) => AbsoluteCompareLessThanOrEqual(left, right); + + + /// AbsoluteDifference : Absolute difference + + /// + /// svint8_t svabd[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// SABD Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; SABD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svabd[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// SABD Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// SABD Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; SABD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svabd[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; SABD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; SABD Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_AD_3A SABD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sabd, EA_SCALABLE, REG_V5, REG_P2, REG_V6, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) => AbsoluteDifference(left, right); + + /// + /// svint16_t svabd[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// SABD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; SABD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svabd[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// SABD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// SABD Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; SABD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svabd[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; SABD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; SABD Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_AD_3A SABD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sabd, EA_SCALABLE, REG_V5, REG_P2, REG_V6, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) => AbsoluteDifference(left, right); + + /// + /// svint32_t svabd[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// SABD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; SABD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svabd[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// SABD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// SABD Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; SABD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svabd[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; SABD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; SABD Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_AD_3A SABD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sabd, EA_SCALABLE, REG_V5, REG_P2, REG_V6, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) => AbsoluteDifference(left, right); + + /// + /// svint64_t svabd[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// SABD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; SABD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svabd[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// SABD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// SABD Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; SABD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svabd[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; SABD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; SABD Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_AD_3A SABD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sabd, EA_SCALABLE, REG_V5, REG_P2, REG_V6, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) => AbsoluteDifference(left, right); + + /// + /// svuint8_t svabd[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// UABD Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; UABD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svabd[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// UABD Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// UABD Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; UABD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svabd[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; UABD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; UABD Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_AD_3A UABD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uabd, EA_SCALABLE, REG_V23, REG_P3, REG_V9, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) => AbsoluteDifference(left, right); + + /// + /// svuint16_t svabd[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// UABD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; UABD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svabd[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// UABD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// UABD Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; UABD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svabd[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; UABD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; UABD Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_AD_3A UABD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uabd, EA_SCALABLE, REG_V23, REG_P3, REG_V9, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) => AbsoluteDifference(left, right); + + /// + /// svuint32_t svabd[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// UABD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; UABD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svabd[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// UABD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// UABD Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; UABD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svabd[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; UABD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; UABD Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_AD_3A UABD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uabd, EA_SCALABLE, REG_V23, REG_P3, REG_V9, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) => AbsoluteDifference(left, right); + + /// + /// svuint64_t svabd[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// UABD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; UABD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svabd[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// UABD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// UABD Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; UABD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svabd[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; UABD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; UABD Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_AD_3A UABD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uabd, EA_SCALABLE, REG_V23, REG_P3, REG_V9, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) => AbsoluteDifference(left, right); + + /// + /// svfloat32_t svabd[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FABD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; FABD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svfloat32_t svabd[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FABD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// FABD Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; FABD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svfloat32_t svabd[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; FABD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; FABD Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FABD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fabd, EA_SCALABLE, REG_V24, REG_P3, REG_V11, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) => AbsoluteDifference(left, right); + + /// + /// svfloat64_t svabd[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FABD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; FABD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svfloat64_t svabd[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FABD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// FABD Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; FABD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svfloat64_t svabd[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; FABD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; FABD Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FABD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fabd, EA_SCALABLE, REG_V24, REG_P3, REG_V11, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) => AbsoluteDifference(left, right); + + + /// Add : Add + + /// + /// svint8_t svadd[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// ADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; ADD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svadd[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// ADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// ADD Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// ADD Zresult.B, Zop1.B, Zop2.B + /// svint8_t svadd[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; ADD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; ADD Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_AB_3A ADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_add, EA_SCALABLE, REG_V5, REG_P6, REG_V7, INS_OPTS_SCALABLE_B); + /// IF_SVE_AT_3A ADD ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_add, EA_SCALABLE, REG_V0, REG_V0, REG_V0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A ADD ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_add, EA_SCALABLE, REG_V0, 0, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Add(Vector left, Vector right) => Add(left, right); + + /// + /// svint16_t svadd[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// ADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; ADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svadd[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// ADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// ADD Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// ADD Zresult.H, Zop1.H, Zop2.H + /// svint16_t svadd[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; ADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; ADD Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_AB_3A ADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_add, EA_SCALABLE, REG_V5, REG_P6, REG_V7, INS_OPTS_SCALABLE_B); + /// IF_SVE_AT_3A ADD ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_add, EA_SCALABLE, REG_V0, REG_V0, REG_V0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A ADD ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_add, EA_SCALABLE, REG_V0, 0, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Add(Vector left, Vector right) => Add(left, right); + + /// + /// svint32_t svadd[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// ADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; ADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svadd[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// ADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// ADD Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// ADD Zresult.S, Zop1.S, Zop2.S + /// svint32_t svadd[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; ADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; ADD Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_AB_3A ADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_add, EA_SCALABLE, REG_V5, REG_P6, REG_V7, INS_OPTS_SCALABLE_B); + /// IF_SVE_AT_3A ADD ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_add, EA_SCALABLE, REG_V0, REG_V0, REG_V0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A ADD ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_add, EA_SCALABLE, REG_V0, 0, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Add(Vector left, Vector right) => Add(left, right); + + /// + /// svint64_t svadd[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// ADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; ADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svadd[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// ADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// ADD Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// ADD Zresult.D, Zop1.D, Zop2.D + /// svint64_t svadd[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; ADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; ADD Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_AB_3A ADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_add, EA_SCALABLE, REG_V5, REG_P6, REG_V7, INS_OPTS_SCALABLE_B); + /// IF_SVE_AT_3A ADD ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_add, EA_SCALABLE, REG_V0, REG_V0, REG_V0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A ADD ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_add, EA_SCALABLE, REG_V0, 0, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Add(Vector left, Vector right) => Add(left, right); + + /// + /// svuint8_t svadd[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// ADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; ADD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svadd[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// ADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// ADD Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// ADD Zresult.B, Zop1.B, Zop2.B + /// svuint8_t svadd[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; ADD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; ADD Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_AB_3A ADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_add, EA_SCALABLE, REG_V5, REG_P6, REG_V7, INS_OPTS_SCALABLE_B); + /// IF_SVE_AT_3A ADD ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_add, EA_SCALABLE, REG_V0, REG_V0, REG_V0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A ADD ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_add, EA_SCALABLE, REG_V0, 0, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Add(Vector left, Vector right) => Add(left, right); + + /// + /// svuint16_t svadd[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// ADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; ADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svadd[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// ADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// ADD Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// ADD Zresult.H, Zop1.H, Zop2.H + /// svuint16_t svadd[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; ADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; ADD Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_AB_3A ADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_add, EA_SCALABLE, REG_V5, REG_P6, REG_V7, INS_OPTS_SCALABLE_B); + /// IF_SVE_AT_3A ADD ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_add, EA_SCALABLE, REG_V0, REG_V0, REG_V0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A ADD ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_add, EA_SCALABLE, REG_V0, 0, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Add(Vector left, Vector right) => Add(left, right); + + /// + /// svuint32_t svadd[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// ADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; ADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svadd[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// ADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// ADD Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// ADD Zresult.S, Zop1.S, Zop2.S + /// svuint32_t svadd[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; ADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; ADD Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_AB_3A ADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_add, EA_SCALABLE, REG_V5, REG_P6, REG_V7, INS_OPTS_SCALABLE_B); + /// IF_SVE_AT_3A ADD ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_add, EA_SCALABLE, REG_V0, REG_V0, REG_V0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A ADD ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_add, EA_SCALABLE, REG_V0, 0, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Add(Vector left, Vector right) => Add(left, right); + + /// + /// svuint64_t svadd[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// ADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; ADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svadd[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// ADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// ADD Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// ADD Zresult.D, Zop1.D, Zop2.D + /// svuint64_t svadd[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; ADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; ADD Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_AB_3A ADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_add, EA_SCALABLE, REG_V5, REG_P6, REG_V7, INS_OPTS_SCALABLE_B); + /// IF_SVE_AT_3A ADD ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_add, EA_SCALABLE, REG_V0, REG_V0, REG_V0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A ADD ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_add, EA_SCALABLE, REG_V0, 0, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Add(Vector left, Vector right) => Add(left, right); + + /// + /// svfloat32_t svadd[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; FADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svfloat32_t svadd[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// FADD Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// FADD Zresult.S, Zop1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; FADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svfloat32_t svadd[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; FADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; FADD Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fadd, EA_SCALABLE, REG_V25, REG_P2, REG_V10, INS_OPTS_SCALABLE_S); + /// IF_SVE_HK_3A FADD ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fadd, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_HM_2A FADD ., /M, ., + /// theEmitter->emitIns_R_R_F(INS_sve_fadd, EA_SCALABLE, REG_V0, REG_P0, 0.5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_F(INS_sve_fadd, EA_SCALABLE, REG_V0, REG_P1, 1.0, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Add(Vector left, Vector right) => Add(left, right); + + /// + /// svfloat64_t svadd[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; FADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svfloat64_t svadd[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// FADD Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// FADD Zresult.D, Zop1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; FADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svfloat64_t svadd[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; FADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; FADD Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fadd, EA_SCALABLE, REG_V25, REG_P2, REG_V10, INS_OPTS_SCALABLE_S); + /// IF_SVE_HK_3A FADD ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fadd, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_HM_2A FADD ., /M, ., + /// theEmitter->emitIns_R_R_F(INS_sve_fadd, EA_SCALABLE, REG_V0, REG_P0, 0.5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_F(INS_sve_fadd, EA_SCALABLE, REG_V0, REG_P1, 1.0, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Add(Vector left, Vector right) => Add(left, right); + + + /// AddAcross : Add reduction + + /// + /// int64_t svaddv[_s8](svbool_t pg, svint8_t op) + /// SADDV Dresult, Pg, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_AI_3A SADDV
, , . + /// theEmitter->emitIns_R_R_R(INS_sve_saddv, EA_1BYTE, REG_V1, REG_P4, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_saddv, EA_2BYTE, REG_V2, REG_P5, REG_V3, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + ///
+ public static unsafe Vector AddAcross(Vector value) => AddAcross(value); + + /// + /// int64_t svaddv[_s16](svbool_t pg, svint16_t op) + /// SADDV Dresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_AI_3A SADDV
, , . + /// theEmitter->emitIns_R_R_R(INS_sve_saddv, EA_1BYTE, REG_V1, REG_P4, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_saddv, EA_2BYTE, REG_V2, REG_P5, REG_V3, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + ///
+ public static unsafe Vector AddAcross(Vector value) => AddAcross(value); + + /// + /// int64_t svaddv[_s32](svbool_t pg, svint32_t op) + /// SADDV Dresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AI_3A SADDV
, , . + /// theEmitter->emitIns_R_R_R(INS_sve_saddv, EA_1BYTE, REG_V1, REG_P4, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_saddv, EA_2BYTE, REG_V2, REG_P5, REG_V3, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + ///
+ public static unsafe Vector AddAcross(Vector value) => AddAcross(value); + + /// + /// int64_t svaddv[_s64](svbool_t pg, svint64_t op) + /// UADDV Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AI_3A UADDV
, , . + /// theEmitter->emitIns_R_R_R(INS_sve_uaddv, EA_4BYTE, REG_V3, REG_P6, REG_V4, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + ///
+ public static unsafe Vector AddAcross(Vector value) => AddAcross(value); + + /// + /// uint64_t svaddv[_u8](svbool_t pg, svuint8_t op) + /// UADDV Dresult, Pg, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_AI_3A UADDV
, , . + /// theEmitter->emitIns_R_R_R(INS_sve_uaddv, EA_4BYTE, REG_V3, REG_P6, REG_V4, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + ///
+ public static unsafe Vector AddAcross(Vector value) => AddAcross(value); + + /// + /// uint64_t svaddv[_u16](svbool_t pg, svuint16_t op) + /// UADDV Dresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_AI_3A UADDV
, , . + /// theEmitter->emitIns_R_R_R(INS_sve_uaddv, EA_4BYTE, REG_V3, REG_P6, REG_V4, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + ///
+ public static unsafe Vector AddAcross(Vector value) => AddAcross(value); + + /// + /// uint64_t svaddv[_u32](svbool_t pg, svuint32_t op) + /// UADDV Dresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AI_3A UADDV
, , . + /// theEmitter->emitIns_R_R_R(INS_sve_uaddv, EA_4BYTE, REG_V3, REG_P6, REG_V4, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + ///
+ public static unsafe Vector AddAcross(Vector value) => AddAcross(value); + + /// + /// uint64_t svaddv[_u64](svbool_t pg, svuint64_t op) + /// UADDV Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AI_3A UADDV
, , . + /// theEmitter->emitIns_R_R_R(INS_sve_uaddv, EA_4BYTE, REG_V3, REG_P6, REG_V4, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + ///
+ public static unsafe Vector AddAcross(Vector value) => AddAcross(value); + + /// + /// float32_t svaddv[_f32](svbool_t pg, svfloat32_t op) + /// FADDV Sresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_HE_3A FADDV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_faddv, EA_2BYTE, REG_V21, REG_P7, REG_V7, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddAcross(Vector value) => AddAcross(value); + + /// + /// float64_t svaddv[_f64](svbool_t pg, svfloat64_t op) + /// FADDV Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_HE_3A FADDV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_faddv, EA_2BYTE, REG_V21, REG_P7, REG_V7, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddAcross(Vector value) => AddAcross(value); + + + /// AddRotateComplex : Complex add with rotate + + /// + /// svfloat32_t svcadd[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, uint64_t imm_rotation) + /// FCADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S, #imm_rotation + /// MOVPRFX Zresult, Zop1; FCADD Zresult.S, Pg/M, Zresult.S, Zop2.S, #imm_rotation + /// svfloat32_t svcadd[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, uint64_t imm_rotation) + /// FCADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S, #imm_rotation + /// MOVPRFX Zresult, Zop1; FCADD Zresult.S, Pg/M, Zresult.S, Zop2.S, #imm_rotation + /// svfloat32_t svcadd[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, uint64_t imm_rotation) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; FCADD Zresult.S, Pg/M, Zresult.S, Zop2.S, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_GP_3A FCADD ., /M, ., ., + /// theEmitter->emitIns_R_R_R_I(INS_sve_fcadd, EA_SCALABLE, REG_V0, REG_P1, REG_V2, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fcadd, EA_SCALABLE, REG_V0, REG_P1, REG_V2, 270, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fcadd, EA_SCALABLE, REG_V0, REG_P1, REG_V2, 270, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fcadd, EA_SCALABLE, REG_V0, REG_P1, REG_V2, 270, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) => AddRotateComplex(left, right, rotation); + + /// + /// svfloat64_t svcadd[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, uint64_t imm_rotation) + /// FCADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D, #imm_rotation + /// MOVPRFX Zresult, Zop1; FCADD Zresult.D, Pg/M, Zresult.D, Zop2.D, #imm_rotation + /// svfloat64_t svcadd[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, uint64_t imm_rotation) + /// FCADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D, #imm_rotation + /// MOVPRFX Zresult, Zop1; FCADD Zresult.D, Pg/M, Zresult.D, Zop2.D, #imm_rotation + /// svfloat64_t svcadd[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, uint64_t imm_rotation) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; FCADD Zresult.D, Pg/M, Zresult.D, Zop2.D, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_GP_3A FCADD ., /M, ., ., + /// theEmitter->emitIns_R_R_R_I(INS_sve_fcadd, EA_SCALABLE, REG_V0, REG_P1, REG_V2, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fcadd, EA_SCALABLE, REG_V0, REG_P1, REG_V2, 270, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fcadd, EA_SCALABLE, REG_V0, REG_P1, REG_V2, 270, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fcadd, EA_SCALABLE, REG_V0, REG_P1, REG_V2, 270, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) => AddRotateComplex(left, right, rotation); + + + /// AddSaturate : Saturating add + + /// + /// svint8_t svqadd[_s8](svint8_t op1, svint8_t op2) + /// SQADD Zresult.B, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_ET_3A SQADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqadd, EA_SCALABLE, REG_V28, REG_P1, REG_V23, INS_OPTS_SCALABLE_B); + /// IF_SVE_AT_3A SQADD ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqadd, EA_SCALABLE, REG_V3, REG_V31, REG_V12, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A SQADD ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_sqadd, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svint16_t svqadd[_s16](svint16_t op1, svint16_t op2) + /// SQADD Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_ET_3A SQADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqadd, EA_SCALABLE, REG_V28, REG_P1, REG_V23, INS_OPTS_SCALABLE_B); + /// IF_SVE_AT_3A SQADD ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqadd, EA_SCALABLE, REG_V3, REG_V31, REG_V12, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A SQADD ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_sqadd, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svint32_t svqadd[_s32](svint32_t op1, svint32_t op2) + /// SQADD Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_ET_3A SQADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqadd, EA_SCALABLE, REG_V28, REG_P1, REG_V23, INS_OPTS_SCALABLE_B); + /// IF_SVE_AT_3A SQADD ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqadd, EA_SCALABLE, REG_V3, REG_V31, REG_V12, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A SQADD ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_sqadd, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svint64_t svqadd[_s64](svint64_t op1, svint64_t op2) + /// SQADD Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_ET_3A SQADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqadd, EA_SCALABLE, REG_V28, REG_P1, REG_V23, INS_OPTS_SCALABLE_B); + /// IF_SVE_AT_3A SQADD ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqadd, EA_SCALABLE, REG_V3, REG_V31, REG_V12, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A SQADD ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_sqadd, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svuint8_t svqadd[_u8](svuint8_t op1, svuint8_t op2) + /// UQADD Zresult.B, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_ET_3A UQADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqadd, EA_SCALABLE, REG_V0, REG_P3, REG_V27, INS_OPTS_SCALABLE_S); + /// IF_SVE_AT_3A UQADD ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqadd, EA_SCALABLE, REG_V23, REG_V28, REG_V29, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A UQADD ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_uqadd, EA_SCALABLE, REG_V5, 5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svuint16_t svqadd[_u16](svuint16_t op1, svuint16_t op2) + /// UQADD Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_ET_3A UQADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqadd, EA_SCALABLE, REG_V0, REG_P3, REG_V27, INS_OPTS_SCALABLE_S); + /// IF_SVE_AT_3A UQADD ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqadd, EA_SCALABLE, REG_V23, REG_V28, REG_V29, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A UQADD ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_uqadd, EA_SCALABLE, REG_V5, 5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svuint32_t svqadd[_u32](svuint32_t op1, svuint32_t op2) + /// UQADD Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_ET_3A UQADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqadd, EA_SCALABLE, REG_V0, REG_P3, REG_V27, INS_OPTS_SCALABLE_S); + /// IF_SVE_AT_3A UQADD ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqadd, EA_SCALABLE, REG_V23, REG_V28, REG_V29, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A UQADD ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_uqadd, EA_SCALABLE, REG_V5, 5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svuint64_t svqadd[_u64](svuint64_t op1, svuint64_t op2) + /// UQADD Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_ET_3A UQADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqadd, EA_SCALABLE, REG_V0, REG_P3, REG_V27, INS_OPTS_SCALABLE_S); + /// IF_SVE_AT_3A UQADD ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqadd, EA_SCALABLE, REG_V23, REG_V28, REG_V29, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A UQADD ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_uqadd, EA_SCALABLE, REG_V5, 5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + + /// AddSequentialAcross : Add reduction (strictly-ordered) + + /// + /// float32_t svadda[_f32](svbool_t pg, float32_t initial, svfloat32_t op) + /// FADDA Stied, Pg, Stied, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_HJ_3A FADDA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_fadda, EA_2BYTE, REG_V21, REG_P6, REG_V14, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_fadda, EA_4BYTE, REG_V22, REG_P5, REG_V13, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_fadda, EA_8BYTE, REG_V23, REG_P4, REG_V12, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddSequentialAcross(Vector initial, Vector value) => AddSequentialAcross(initial, value); + + /// + /// float64_t svadda[_f64](svbool_t pg, float64_t initial, svfloat64_t op) + /// FADDA Dtied, Pg, Dtied, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_HJ_3A FADDA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_fadda, EA_2BYTE, REG_V21, REG_P6, REG_V14, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_fadda, EA_4BYTE, REG_V22, REG_P5, REG_V13, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_fadda, EA_8BYTE, REG_V23, REG_P4, REG_V12, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddSequentialAcross(Vector initial, Vector value) => AddSequentialAcross(initial, value); + + + /// And : Bitwise AND + + /// + /// svint8_t svand[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// AND Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; AND Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svand[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// AND Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// AND Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// AND Zresult.D, Zop1.D, Zop2.D + /// svint8_t svand[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; AND Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; AND Zresult.B, Pg/M, Zresult.B, Zop1.B + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// AND Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A AND ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_and, EA_SCALABLE, REG_V0, REG_P1, REG_V2, INS_OPTS_SCALABLE_B); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_and, EA_SCALABLE, REG_P0, REG_P3, REG_P14, REG_P15, INS_OPTS_SCALABLE_B); /* AND .B, /Z, .B, .B */ + /// IF_SVE_AU_3A AND .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_and, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector And(Vector left, Vector right) => And(left, right); + + /// + /// svint16_t svand[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// AND Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; AND Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svand[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// AND Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// AND Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// AND Zresult.D, Zop1.D, Zop2.D + /// svint16_t svand[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; AND Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; AND Zresult.H, Pg/M, Zresult.H, Zop1.H + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// AND Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A AND ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_and, EA_SCALABLE, REG_V0, REG_P1, REG_V2, INS_OPTS_SCALABLE_B); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_and, EA_SCALABLE, REG_P0, REG_P3, REG_P14, REG_P15, INS_OPTS_SCALABLE_B); /* AND .B, /Z, .B, .B */ + /// IF_SVE_AU_3A AND .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_and, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector And(Vector left, Vector right) => And(left, right); + + /// + /// svint32_t svand[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// AND Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; AND Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svand[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// AND Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// AND Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// AND Zresult.D, Zop1.D, Zop2.D + /// svint32_t svand[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; AND Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; AND Zresult.S, Pg/M, Zresult.S, Zop1.S + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// AND Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A AND ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_and, EA_SCALABLE, REG_V0, REG_P1, REG_V2, INS_OPTS_SCALABLE_B); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_and, EA_SCALABLE, REG_P0, REG_P3, REG_P14, REG_P15, INS_OPTS_SCALABLE_B); /* AND .B, /Z, .B, .B */ + /// IF_SVE_AU_3A AND .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_and, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector And(Vector left, Vector right) => And(left, right); + + /// + /// svint64_t svand[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// AND Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; AND Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svand[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// AND Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// AND Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// AND Zresult.D, Zop1.D, Zop2.D + /// svint64_t svand[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; AND Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; AND Zresult.D, Pg/M, Zresult.D, Zop1.D + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// AND Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A AND ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_and, EA_SCALABLE, REG_V0, REG_P1, REG_V2, INS_OPTS_SCALABLE_B); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_and, EA_SCALABLE, REG_P0, REG_P3, REG_P14, REG_P15, INS_OPTS_SCALABLE_B); /* AND .B, /Z, .B, .B */ + /// IF_SVE_AU_3A AND .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_and, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector And(Vector left, Vector right) => And(left, right); + + /// + /// svuint8_t svand[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// AND Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; AND Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svand[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// AND Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// AND Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// AND Zresult.D, Zop1.D, Zop2.D + /// svuint8_t svand[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; AND Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; AND Zresult.B, Pg/M, Zresult.B, Zop1.B + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// AND Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A AND ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_and, EA_SCALABLE, REG_V0, REG_P1, REG_V2, INS_OPTS_SCALABLE_B); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_and, EA_SCALABLE, REG_P0, REG_P3, REG_P14, REG_P15, INS_OPTS_SCALABLE_B); /* AND .B, /Z, .B, .B */ + /// IF_SVE_AU_3A AND .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_and, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector And(Vector left, Vector right) => And(left, right); + + /// + /// svuint16_t svand[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// AND Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; AND Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svand[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// AND Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// AND Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// AND Zresult.D, Zop1.D, Zop2.D + /// svuint16_t svand[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; AND Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; AND Zresult.H, Pg/M, Zresult.H, Zop1.H + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// AND Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A AND ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_and, EA_SCALABLE, REG_V0, REG_P1, REG_V2, INS_OPTS_SCALABLE_B); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_and, EA_SCALABLE, REG_P0, REG_P3, REG_P14, REG_P15, INS_OPTS_SCALABLE_B); /* AND .B, /Z, .B, .B */ + /// IF_SVE_AU_3A AND .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_and, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector And(Vector left, Vector right) => And(left, right); + + /// + /// svuint32_t svand[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// AND Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; AND Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svand[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// AND Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// AND Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// AND Zresult.D, Zop1.D, Zop2.D + /// svuint32_t svand[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; AND Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; AND Zresult.S, Pg/M, Zresult.S, Zop1.S + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// AND Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A AND ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_and, EA_SCALABLE, REG_V0, REG_P1, REG_V2, INS_OPTS_SCALABLE_B); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_and, EA_SCALABLE, REG_P0, REG_P3, REG_P14, REG_P15, INS_OPTS_SCALABLE_B); /* AND .B, /Z, .B, .B */ + /// IF_SVE_AU_3A AND .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_and, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector And(Vector left, Vector right) => And(left, right); + + /// + /// svuint64_t svand[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// AND Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; AND Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svand[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// AND Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// AND Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// AND Zresult.D, Zop1.D, Zop2.D + /// svuint64_t svand[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; AND Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; AND Zresult.D, Pg/M, Zresult.D, Zop1.D + /// svbool_t svand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// AND Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A AND ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_and, EA_SCALABLE, REG_V0, REG_P1, REG_V2, INS_OPTS_SCALABLE_B); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_and, EA_SCALABLE, REG_P0, REG_P3, REG_P14, REG_P15, INS_OPTS_SCALABLE_B); /* AND .B, /Z, .B, .B */ + /// IF_SVE_AU_3A AND .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_and, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector And(Vector left, Vector right) => And(left, right); + + + /// AndAcross : Bitwise AND reduction to scalar + + /// + /// int8_t svandv[_s8](svbool_t pg, svint8_t op) + /// ANDV Bresult, Pg, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_AF_3A ANDV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_andv, EA_1BYTE, REG_V0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AndAcross(Vector value) => AndAcross(value); + + /// + /// int16_t svandv[_s16](svbool_t pg, svint16_t op) + /// ANDV Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_AF_3A ANDV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_andv, EA_1BYTE, REG_V0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AndAcross(Vector value) => AndAcross(value); + + /// + /// int32_t svandv[_s32](svbool_t pg, svint32_t op) + /// ANDV Sresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AF_3A ANDV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_andv, EA_1BYTE, REG_V0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AndAcross(Vector value) => AndAcross(value); + + /// + /// int64_t svandv[_s64](svbool_t pg, svint64_t op) + /// ANDV Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AF_3A ANDV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_andv, EA_1BYTE, REG_V0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AndAcross(Vector value) => AndAcross(value); + + /// + /// uint8_t svandv[_u8](svbool_t pg, svuint8_t op) + /// ANDV Bresult, Pg, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_AF_3A ANDV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_andv, EA_1BYTE, REG_V0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AndAcross(Vector value) => AndAcross(value); + + /// + /// uint16_t svandv[_u16](svbool_t pg, svuint16_t op) + /// ANDV Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_AF_3A ANDV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_andv, EA_1BYTE, REG_V0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AndAcross(Vector value) => AndAcross(value); + + /// + /// uint32_t svandv[_u32](svbool_t pg, svuint32_t op) + /// ANDV Sresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AF_3A ANDV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_andv, EA_1BYTE, REG_V0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AndAcross(Vector value) => AndAcross(value); + + /// + /// uint64_t svandv[_u64](svbool_t pg, svuint64_t op) + /// ANDV Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AF_3A ANDV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_andv, EA_1BYTE, REG_V0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AndAcross(Vector value) => AndAcross(value); + + + /// AndNot : Bitwise NAND + + /// + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// NAND Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_nand, EA_SCALABLE, REG_P1, REG_P7, REG_P2, REG_P15, INS_OPTS_SCALABLE_B); /* NAND .B, /Z, .B, .B */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AndNot(Vector left, Vector right) => AndNot(left, right); + + /// + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// NAND Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_nand, EA_SCALABLE, REG_P1, REG_P7, REG_P2, REG_P15, INS_OPTS_SCALABLE_B); /* NAND .B, /Z, .B, .B */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AndNot(Vector left, Vector right) => AndNot(left, right); + + /// + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// NAND Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_nand, EA_SCALABLE, REG_P1, REG_P7, REG_P2, REG_P15, INS_OPTS_SCALABLE_B); /* NAND .B, /Z, .B, .B */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AndNot(Vector left, Vector right) => AndNot(left, right); + + /// + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// NAND Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_nand, EA_SCALABLE, REG_P1, REG_P7, REG_P2, REG_P15, INS_OPTS_SCALABLE_B); /* NAND .B, /Z, .B, .B */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AndNot(Vector left, Vector right) => AndNot(left, right); + + /// + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// NAND Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_nand, EA_SCALABLE, REG_P1, REG_P7, REG_P2, REG_P15, INS_OPTS_SCALABLE_B); /* NAND .B, /Z, .B, .B */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AndNot(Vector left, Vector right) => AndNot(left, right); + + /// + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// NAND Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_nand, EA_SCALABLE, REG_P1, REG_P7, REG_P2, REG_P15, INS_OPTS_SCALABLE_B); /* NAND .B, /Z, .B, .B */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AndNot(Vector left, Vector right) => AndNot(left, right); + + /// + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// NAND Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_nand, EA_SCALABLE, REG_P1, REG_P7, REG_P2, REG_P15, INS_OPTS_SCALABLE_B); /* NAND .B, /Z, .B, .B */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AndNot(Vector left, Vector right) => AndNot(left, right); + + /// + /// svbool_t svnand[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// NAND Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_nand, EA_SCALABLE, REG_P1, REG_P7, REG_P2, REG_P15, INS_OPTS_SCALABLE_B); /* NAND .B, /Z, .B, .B */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AndNot(Vector left, Vector right) => AndNot(left, right); + + + /// BitwiseClear : Bitwise clear + + /// + /// svint8_t svbic[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// BIC Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; BIC Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svbic[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// BIC Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// BIC Zresult.D, Zop1.D, Zop2.D + /// svint8_t svbic[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; BIC Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BIC Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A BIC ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_bic, EA_SCALABLE, REG_V3, REG_P4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_bic, EA_SCALABLE, REG_P4, REG_P9, REG_P12, REG_P11, INS_OPTS_SCALABLE_B); /* BIC .B, /Z, .B, .B */ + /// IF_SVE_AU_3A BIC .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bic, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector BitwiseClear(Vector left, Vector right) => BitwiseClear(left, right); + + /// + /// svint16_t svbic[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// BIC Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; BIC Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svbic[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// BIC Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// BIC Zresult.D, Zop1.D, Zop2.D + /// svint16_t svbic[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; BIC Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BIC Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A BIC ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_bic, EA_SCALABLE, REG_V3, REG_P4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_bic, EA_SCALABLE, REG_P4, REG_P9, REG_P12, REG_P11, INS_OPTS_SCALABLE_B); /* BIC .B, /Z, .B, .B */ + /// IF_SVE_AU_3A BIC .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bic, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector BitwiseClear(Vector left, Vector right) => BitwiseClear(left, right); + + /// + /// svint32_t svbic[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// BIC Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; BIC Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svbic[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// BIC Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// BIC Zresult.D, Zop1.D, Zop2.D + /// svint32_t svbic[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; BIC Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BIC Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A BIC ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_bic, EA_SCALABLE, REG_V3, REG_P4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_bic, EA_SCALABLE, REG_P4, REG_P9, REG_P12, REG_P11, INS_OPTS_SCALABLE_B); /* BIC .B, /Z, .B, .B */ + /// IF_SVE_AU_3A BIC .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bic, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector BitwiseClear(Vector left, Vector right) => BitwiseClear(left, right); + + /// + /// svint64_t svbic[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// BIC Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; BIC Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svbic[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// BIC Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// BIC Zresult.D, Zop1.D, Zop2.D + /// svint64_t svbic[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; BIC Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BIC Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A BIC ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_bic, EA_SCALABLE, REG_V3, REG_P4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_bic, EA_SCALABLE, REG_P4, REG_P9, REG_P12, REG_P11, INS_OPTS_SCALABLE_B); /* BIC .B, /Z, .B, .B */ + /// IF_SVE_AU_3A BIC .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bic, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector BitwiseClear(Vector left, Vector right) => BitwiseClear(left, right); + + /// + /// svuint8_t svbic[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// BIC Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; BIC Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svbic[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// BIC Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// BIC Zresult.D, Zop1.D, Zop2.D + /// svuint8_t svbic[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; BIC Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BIC Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A BIC ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_bic, EA_SCALABLE, REG_V3, REG_P4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_bic, EA_SCALABLE, REG_P4, REG_P9, REG_P12, REG_P11, INS_OPTS_SCALABLE_B); /* BIC .B, /Z, .B, .B */ + /// IF_SVE_AU_3A BIC .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bic, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector BitwiseClear(Vector left, Vector right) => BitwiseClear(left, right); + + /// + /// svuint16_t svbic[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// BIC Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; BIC Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svbic[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// BIC Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// BIC Zresult.D, Zop1.D, Zop2.D + /// svuint16_t svbic[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; BIC Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BIC Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A BIC ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_bic, EA_SCALABLE, REG_V3, REG_P4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_bic, EA_SCALABLE, REG_P4, REG_P9, REG_P12, REG_P11, INS_OPTS_SCALABLE_B); /* BIC .B, /Z, .B, .B */ + /// IF_SVE_AU_3A BIC .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bic, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector BitwiseClear(Vector left, Vector right) => BitwiseClear(left, right); + + /// + /// svuint32_t svbic[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// BIC Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; BIC Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svbic[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// BIC Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// BIC Zresult.D, Zop1.D, Zop2.D + /// svuint32_t svbic[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; BIC Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BIC Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A BIC ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_bic, EA_SCALABLE, REG_V3, REG_P4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_bic, EA_SCALABLE, REG_P4, REG_P9, REG_P12, REG_P11, INS_OPTS_SCALABLE_B); /* BIC .B, /Z, .B, .B */ + /// IF_SVE_AU_3A BIC .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bic, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector BitwiseClear(Vector left, Vector right) => BitwiseClear(left, right); + + /// + /// svuint64_t svbic[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// BIC Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; BIC Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svbic[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// BIC Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// BIC Zresult.D, Zop1.D, Zop2.D + /// svuint64_t svbic[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; BIC Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svbool_t svbic[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BIC Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A BIC ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_bic, EA_SCALABLE, REG_V3, REG_P4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_bic, EA_SCALABLE, REG_P4, REG_P9, REG_P12, REG_P11, INS_OPTS_SCALABLE_B); /* BIC .B, /Z, .B, .B */ + /// IF_SVE_AU_3A BIC .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bic, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector BitwiseClear(Vector left, Vector right) => BitwiseClear(left, right); + + + /// BooleanNot : Logically invert boolean condition + + /// + /// svint8_t svcnot[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) + /// CNOT Ztied.B, Pg/M, Zop.B + /// MOVPRFX Zresult, Zinactive; CNOT Zresult.B, Pg/M, Zop.B + /// svint8_t svcnot[_s8]_x(svbool_t pg, svint8_t op) + /// CNOT Ztied.B, Pg/M, Ztied.B + /// MOVPRFX Zresult, Zop; CNOT Zresult.B, Pg/M, Zop.B + /// svint8_t svcnot[_s8]_z(svbool_t pg, svint8_t op) + /// MOVPRFX Zresult.B, Pg/Z, Zop.B; CNOT Zresult.B, Pg/M, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CNOT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_cnot, EA_SCALABLE, REG_V29, REG_P2, REG_V2, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector BooleanNot(Vector value) => BooleanNot(value); + + /// + /// svint16_t svcnot[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) + /// CNOT Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; CNOT Zresult.H, Pg/M, Zop.H + /// svint16_t svcnot[_s16]_x(svbool_t pg, svint16_t op) + /// CNOT Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; CNOT Zresult.H, Pg/M, Zop.H + /// svint16_t svcnot[_s16]_z(svbool_t pg, svint16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; CNOT Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CNOT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_cnot, EA_SCALABLE, REG_V29, REG_P2, REG_V2, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector BooleanNot(Vector value) => BooleanNot(value); + + /// + /// svint32_t svcnot[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// CNOT Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; CNOT Zresult.S, Pg/M, Zop.S + /// svint32_t svcnot[_s32]_x(svbool_t pg, svint32_t op) + /// CNOT Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; CNOT Zresult.S, Pg/M, Zop.S + /// svint32_t svcnot[_s32]_z(svbool_t pg, svint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; CNOT Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CNOT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_cnot, EA_SCALABLE, REG_V29, REG_P2, REG_V2, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector BooleanNot(Vector value) => BooleanNot(value); + + /// + /// svint64_t svcnot[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// CNOT Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; CNOT Zresult.D, Pg/M, Zop.D + /// svint64_t svcnot[_s64]_x(svbool_t pg, svint64_t op) + /// CNOT Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; CNOT Zresult.D, Pg/M, Zop.D + /// svint64_t svcnot[_s64]_z(svbool_t pg, svint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; CNOT Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CNOT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_cnot, EA_SCALABLE, REG_V29, REG_P2, REG_V2, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector BooleanNot(Vector value) => BooleanNot(value); + + /// + /// svuint8_t svcnot[_u8]_m(svuint8_t inactive, svbool_t pg, svuint8_t op) + /// CNOT Ztied.B, Pg/M, Zop.B + /// MOVPRFX Zresult, Zinactive; CNOT Zresult.B, Pg/M, Zop.B + /// svuint8_t svcnot[_u8]_x(svbool_t pg, svuint8_t op) + /// CNOT Ztied.B, Pg/M, Ztied.B + /// MOVPRFX Zresult, Zop; CNOT Zresult.B, Pg/M, Zop.B + /// svuint8_t svcnot[_u8]_z(svbool_t pg, svuint8_t op) + /// MOVPRFX Zresult.B, Pg/Z, Zop.B; CNOT Zresult.B, Pg/M, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CNOT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_cnot, EA_SCALABLE, REG_V29, REG_P2, REG_V2, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector BooleanNot(Vector value) => BooleanNot(value); + + /// + /// svuint16_t svcnot[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) + /// CNOT Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; CNOT Zresult.H, Pg/M, Zop.H + /// svuint16_t svcnot[_u16]_x(svbool_t pg, svuint16_t op) + /// CNOT Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; CNOT Zresult.H, Pg/M, Zop.H + /// svuint16_t svcnot[_u16]_z(svbool_t pg, svuint16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; CNOT Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CNOT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_cnot, EA_SCALABLE, REG_V29, REG_P2, REG_V2, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector BooleanNot(Vector value) => BooleanNot(value); + + /// + /// svuint32_t svcnot[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// CNOT Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; CNOT Zresult.S, Pg/M, Zop.S + /// svuint32_t svcnot[_u32]_x(svbool_t pg, svuint32_t op) + /// CNOT Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; CNOT Zresult.S, Pg/M, Zop.S + /// svuint32_t svcnot[_u32]_z(svbool_t pg, svuint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; CNOT Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CNOT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_cnot, EA_SCALABLE, REG_V29, REG_P2, REG_V2, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector BooleanNot(Vector value) => BooleanNot(value); + + /// + /// svuint64_t svcnot[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// CNOT Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; CNOT Zresult.D, Pg/M, Zop.D + /// svuint64_t svcnot[_u64]_x(svbool_t pg, svuint64_t op) + /// CNOT Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; CNOT Zresult.D, Pg/M, Zop.D + /// svuint64_t svcnot[_u64]_z(svbool_t pg, svuint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; CNOT Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CNOT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_cnot, EA_SCALABLE, REG_V29, REG_P2, REG_V2, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector BooleanNot(Vector value) => BooleanNot(value); + + + + /// Compact : Shuffle active elements of vector to the right and fill with zero + + /// + /// svint32_t svcompact[_s32](svbool_t pg, svint32_t op) + /// COMPACT Zresult.S, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_CL_3A COMPACT ., , . + /// theEmitter->emitIns_R_R_R(INS_sve_compact, EA_SCALABLE, REG_V16, REG_P7, REG_V13, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_compact, EA_SCALABLE, REG_V15, REG_P0, REG_V12, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector Compact(Vector mask, Vector value) => Compact(mask, value); + + /// + /// svint64_t svcompact[_s64](svbool_t pg, svint64_t op) + /// COMPACT Zresult.D, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_CL_3A COMPACT ., , . + /// theEmitter->emitIns_R_R_R(INS_sve_compact, EA_SCALABLE, REG_V16, REG_P7, REG_V13, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_compact, EA_SCALABLE, REG_V15, REG_P0, REG_V12, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector Compact(Vector mask, Vector value) => Compact(mask, value); + + /// + /// svuint32_t svcompact[_u32](svbool_t pg, svuint32_t op) + /// COMPACT Zresult.S, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_CL_3A COMPACT ., , . + /// theEmitter->emitIns_R_R_R(INS_sve_compact, EA_SCALABLE, REG_V16, REG_P7, REG_V13, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_compact, EA_SCALABLE, REG_V15, REG_P0, REG_V12, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector Compact(Vector mask, Vector value) => Compact(mask, value); + + /// + /// svuint64_t svcompact[_u64](svbool_t pg, svuint64_t op) + /// COMPACT Zresult.D, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_CL_3A COMPACT ., , . + /// theEmitter->emitIns_R_R_R(INS_sve_compact, EA_SCALABLE, REG_V16, REG_P7, REG_V13, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_compact, EA_SCALABLE, REG_V15, REG_P0, REG_V12, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector Compact(Vector mask, Vector value) => Compact(mask, value); + + /// + /// svfloat32_t svcompact[_f32](svbool_t pg, svfloat32_t op) + /// COMPACT Zresult.S, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_CL_3A COMPACT ., , . + /// theEmitter->emitIns_R_R_R(INS_sve_compact, EA_SCALABLE, REG_V16, REG_P7, REG_V13, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_compact, EA_SCALABLE, REG_V15, REG_P0, REG_V12, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector Compact(Vector mask, Vector value) => Compact(mask, value); + + /// + /// svfloat64_t svcompact[_f64](svbool_t pg, svfloat64_t op) + /// COMPACT Zresult.D, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_CL_3A COMPACT ., , . + /// theEmitter->emitIns_R_R_R(INS_sve_compact, EA_SCALABLE, REG_V16, REG_P7, REG_V13, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_compact, EA_SCALABLE, REG_V15, REG_P0, REG_V12, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector Compact(Vector mask, Vector value) => Compact(mask, value); + + + /// CompareEqual : Compare equal to + + /// + /// svbool_t svcmpeq[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// CMPEQ Presult.B, Pg/Z, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPEQ ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P0, REG_V0, REG_V10, INS_OPTS_SCALABLE_B); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P7, REG_V31, REG_V3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPEQ ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P0, REG_V31, 8, INS_OPTS_SCALABLE_B); /* CMPEQ ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + /// + /// svbool_t svcmpeq_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2) + /// CMPEQ Presult.B, Pg/Z, Zop1.B, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPEQ ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P0, REG_V0, REG_V10, INS_OPTS_SCALABLE_B); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P7, REG_V31, REG_V3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPEQ ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P0, REG_V31, 8, INS_OPTS_SCALABLE_B); /* CMPEQ ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + /// + /// svbool_t svcmpeq[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// CMPEQ Presult.H, Pg/Z, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPEQ ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P0, REG_V0, REG_V10, INS_OPTS_SCALABLE_B); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P7, REG_V31, REG_V3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPEQ ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P0, REG_V31, 8, INS_OPTS_SCALABLE_B); /* CMPEQ ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + /// + /// svbool_t svcmpeq_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2) + /// CMPEQ Presult.H, Pg/Z, Zop1.H, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPEQ ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P0, REG_V0, REG_V10, INS_OPTS_SCALABLE_B); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P7, REG_V31, REG_V3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPEQ ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P0, REG_V31, 8, INS_OPTS_SCALABLE_B); /* CMPEQ ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + /// + /// svbool_t svcmpeq[_s32](svbool_t pg, svint32_t op1, svint32_t op2) + /// CMPEQ Presult.S, Pg/Z, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPEQ ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P0, REG_V0, REG_V10, INS_OPTS_SCALABLE_B); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P7, REG_V31, REG_V3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPEQ ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P0, REG_V31, 8, INS_OPTS_SCALABLE_B); /* CMPEQ ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + /// + /// svbool_t svcmpeq_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2) + /// CMPEQ Presult.S, Pg/Z, Zop1.S, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPEQ ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P0, REG_V0, REG_V10, INS_OPTS_SCALABLE_B); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P7, REG_V31, REG_V3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPEQ ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P0, REG_V31, 8, INS_OPTS_SCALABLE_B); /* CMPEQ ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + /// + /// svbool_t svcmpeq[_s64](svbool_t pg, svint64_t op1, svint64_t op2) + /// CMPEQ Presult.D, Pg/Z, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPEQ ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P0, REG_V0, REG_V10, INS_OPTS_SCALABLE_B); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P7, REG_V31, REG_V3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPEQ ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P0, REG_V31, 8, INS_OPTS_SCALABLE_B); /* CMPEQ ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + /// + /// svbool_t svcmpeq[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// CMPEQ Presult.B, Pg/Z, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPEQ ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P0, REG_V0, REG_V10, INS_OPTS_SCALABLE_B); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P7, REG_V31, REG_V3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPEQ ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P0, REG_V31, 8, INS_OPTS_SCALABLE_B); /* CMPEQ ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + /// + /// svbool_t svcmpeq[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// CMPEQ Presult.H, Pg/Z, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPEQ ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P0, REG_V0, REG_V10, INS_OPTS_SCALABLE_B); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P7, REG_V31, REG_V3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPEQ ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P0, REG_V31, 8, INS_OPTS_SCALABLE_B); /* CMPEQ ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + /// + /// svbool_t svcmpeq[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) + /// CMPEQ Presult.S, Pg/Z, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPEQ ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P0, REG_V0, REG_V10, INS_OPTS_SCALABLE_B); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P7, REG_V31, REG_V3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPEQ ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P0, REG_V31, 8, INS_OPTS_SCALABLE_B); /* CMPEQ ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + /// + /// svbool_t svcmpeq[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) + /// CMPEQ Presult.D, Pg/Z, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPEQ ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P0, REG_V0, REG_V10, INS_OPTS_SCALABLE_B); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P7, REG_V31, REG_V3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPEQ ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpeq, EA_SCALABLE, REG_P15, REG_P0, REG_V31, 8, INS_OPTS_SCALABLE_B); /* CMPEQ ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + /// + /// svbool_t svcmpeq[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FCMEQ Presult.S, Pg/Z, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FCMEQ ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fcmeq, EA_SCALABLE, REG_P2, REG_P4, REG_V28, REG_V8, INS_OPTS_SCALABLE_S); + /// IF_SVE_HI_3A FCMEQ ., /Z, ., #0.0 + /// theEmitter->emitIns_R_R_R(INS_sve_fcmeq, EA_SCALABLE, REG_P2, REG_P3, REG_V4, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + /// + /// svbool_t svcmpeq[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FCMEQ Presult.D, Pg/Z, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FCMEQ ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fcmeq, EA_SCALABLE, REG_P2, REG_P4, REG_V28, REG_V8, INS_OPTS_SCALABLE_S); + /// IF_SVE_HI_3A FCMEQ ., /Z, ., #0.0 + /// theEmitter->emitIns_R_R_R(INS_sve_fcmeq, EA_SCALABLE, REG_P2, REG_P3, REG_V4, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + + /// CompareGreaterThan : Compare greater than + + /// + /// svbool_t svcmpgt[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// CMPGT Presult.B, Pg/Z, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPGT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpgt, EA_SCALABLE, REG_P13, REG_P2, REG_V20, REG_V24, INS_OPTS_SCALABLE_S); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpgt, EA_SCALABLE, REG_P13, REG_P5, REG_V11, REG_V23, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WIDE); /* CMPGT ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpgt, EA_SCALABLE, REG_P10, REG_P1, REG_V18, 4, INS_OPTS_SCALABLE_S); /* CMPGT ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2) + /// CMPGT Presult.B, Pg/Z, Zop1.B, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPGT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpgt, EA_SCALABLE, REG_P13, REG_P2, REG_V20, REG_V24, INS_OPTS_SCALABLE_S); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpgt, EA_SCALABLE, REG_P13, REG_P5, REG_V11, REG_V23, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WIDE); /* CMPGT ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpgt, EA_SCALABLE, REG_P10, REG_P1, REG_V18, 4, INS_OPTS_SCALABLE_S); /* CMPGT ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// CMPGT Presult.H, Pg/Z, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPGT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpgt, EA_SCALABLE, REG_P13, REG_P2, REG_V20, REG_V24, INS_OPTS_SCALABLE_S); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpgt, EA_SCALABLE, REG_P13, REG_P5, REG_V11, REG_V23, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WIDE); /* CMPGT ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpgt, EA_SCALABLE, REG_P10, REG_P1, REG_V18, 4, INS_OPTS_SCALABLE_S); /* CMPGT ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2) + /// CMPGT Presult.H, Pg/Z, Zop1.H, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPGT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpgt, EA_SCALABLE, REG_P13, REG_P2, REG_V20, REG_V24, INS_OPTS_SCALABLE_S); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpgt, EA_SCALABLE, REG_P13, REG_P5, REG_V11, REG_V23, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WIDE); /* CMPGT ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpgt, EA_SCALABLE, REG_P10, REG_P1, REG_V18, 4, INS_OPTS_SCALABLE_S); /* CMPGT ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt[_s32](svbool_t pg, svint32_t op1, svint32_t op2) + /// CMPGT Presult.S, Pg/Z, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPGT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpgt, EA_SCALABLE, REG_P13, REG_P2, REG_V20, REG_V24, INS_OPTS_SCALABLE_S); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpgt, EA_SCALABLE, REG_P13, REG_P5, REG_V11, REG_V23, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WIDE); /* CMPGT ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpgt, EA_SCALABLE, REG_P10, REG_P1, REG_V18, 4, INS_OPTS_SCALABLE_S); /* CMPGT ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2) + /// CMPGT Presult.S, Pg/Z, Zop1.S, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPGT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpgt, EA_SCALABLE, REG_P13, REG_P2, REG_V20, REG_V24, INS_OPTS_SCALABLE_S); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpgt, EA_SCALABLE, REG_P13, REG_P5, REG_V11, REG_V23, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WIDE); /* CMPGT ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpgt, EA_SCALABLE, REG_P10, REG_P1, REG_V18, 4, INS_OPTS_SCALABLE_S); /* CMPGT ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt[_s64](svbool_t pg, svint64_t op1, svint64_t op2) + /// CMPGT Presult.D, Pg/Z, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPGT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpgt, EA_SCALABLE, REG_P13, REG_P2, REG_V20, REG_V24, INS_OPTS_SCALABLE_S); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpgt, EA_SCALABLE, REG_P13, REG_P5, REG_V11, REG_V23, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WIDE); /* CMPGT ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpgt, EA_SCALABLE, REG_P10, REG_P1, REG_V18, 4, INS_OPTS_SCALABLE_S); /* CMPGT ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// CMPHI Presult.B, Pg/Z, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPHI ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphi, EA_SCALABLE, REG_P12, REG_P3, REG_V30, REG_V25, INS_OPTS_SCALABLE_D); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphi, EA_SCALABLE, REG_P12, REG_P4, REG_V1, REG_V31, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPHI ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmphi, EA_SCALABLE, REG_P15, REG_P7, REG_V19, 0, INS_OPTS_SCALABLE_B); /* CMPHI ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt_wide[_u8](svbool_t pg, svuint8_t op1, svuint64_t op2) + /// CMPHI Presult.B, Pg/Z, Zop1.B, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPHI ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphi, EA_SCALABLE, REG_P12, REG_P3, REG_V30, REG_V25, INS_OPTS_SCALABLE_D); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphi, EA_SCALABLE, REG_P12, REG_P4, REG_V1, REG_V31, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPHI ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmphi, EA_SCALABLE, REG_P15, REG_P7, REG_V19, 0, INS_OPTS_SCALABLE_B); /* CMPHI ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// CMPHI Presult.H, Pg/Z, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPHI ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphi, EA_SCALABLE, REG_P12, REG_P3, REG_V30, REG_V25, INS_OPTS_SCALABLE_D); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphi, EA_SCALABLE, REG_P12, REG_P4, REG_V1, REG_V31, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPHI ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmphi, EA_SCALABLE, REG_P15, REG_P7, REG_V19, 0, INS_OPTS_SCALABLE_B); /* CMPHI ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt_wide[_u16](svbool_t pg, svuint16_t op1, svuint64_t op2) + /// CMPHI Presult.H, Pg/Z, Zop1.H, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPHI ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphi, EA_SCALABLE, REG_P12, REG_P3, REG_V30, REG_V25, INS_OPTS_SCALABLE_D); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphi, EA_SCALABLE, REG_P12, REG_P4, REG_V1, REG_V31, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPHI ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmphi, EA_SCALABLE, REG_P15, REG_P7, REG_V19, 0, INS_OPTS_SCALABLE_B); /* CMPHI ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) + /// CMPHI Presult.S, Pg/Z, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPHI ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphi, EA_SCALABLE, REG_P12, REG_P3, REG_V30, REG_V25, INS_OPTS_SCALABLE_D); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphi, EA_SCALABLE, REG_P12, REG_P4, REG_V1, REG_V31, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPHI ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmphi, EA_SCALABLE, REG_P15, REG_P7, REG_V19, 0, INS_OPTS_SCALABLE_B); /* CMPHI ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt_wide[_u32](svbool_t pg, svuint32_t op1, svuint64_t op2) + /// CMPHI Presult.S, Pg/Z, Zop1.S, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPHI ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphi, EA_SCALABLE, REG_P12, REG_P3, REG_V30, REG_V25, INS_OPTS_SCALABLE_D); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphi, EA_SCALABLE, REG_P12, REG_P4, REG_V1, REG_V31, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPHI ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmphi, EA_SCALABLE, REG_P15, REG_P7, REG_V19, 0, INS_OPTS_SCALABLE_B); /* CMPHI ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) + /// CMPHI Presult.D, Pg/Z, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPHI ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphi, EA_SCALABLE, REG_P12, REG_P3, REG_V30, REG_V25, INS_OPTS_SCALABLE_D); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphi, EA_SCALABLE, REG_P12, REG_P4, REG_V1, REG_V31, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPHI ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmphi, EA_SCALABLE, REG_P15, REG_P7, REG_V19, 0, INS_OPTS_SCALABLE_B); /* CMPHI ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FCMGT Presult.S, Pg/Z, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FCMGT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fcmgt, EA_SCALABLE, REG_P3, REG_P6, REG_V18, REG_V28, INS_OPTS_SCALABLE_H); + /// IF_SVE_HI_3A FCMGT ., /Z, ., #0.0 + /// theEmitter->emitIns_R_R_R(INS_sve_fcmgt, EA_SCALABLE, REG_P11, REG_P5, REG_V2, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + /// + /// svbool_t svcmpgt[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FCMGT Presult.D, Pg/Z, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FCMGT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fcmgt, EA_SCALABLE, REG_P3, REG_P6, REG_V18, REG_V28, INS_OPTS_SCALABLE_H); + /// IF_SVE_HI_3A FCMGT ., /Z, ., #0.0 + /// theEmitter->emitIns_R_R_R(INS_sve_fcmgt, EA_SCALABLE, REG_P11, REG_P5, REG_V2, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + + /// CompareGreaterThanOrEqual : Compare greater than or equal to + + /// + /// svbool_t svcmpge[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// CMPGE Presult.B, Pg/Z, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPGE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpge, EA_SCALABLE, REG_P14, REG_P1, REG_V10, REG_V23, INS_OPTS_SCALABLE_H); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpge, EA_SCALABLE, REG_P14, REG_P6, REG_V21, REG_V13, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); /* CMPGE ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpge, EA_SCALABLE, REG_P11, REG_P7, REG_V21, 1, INS_OPTS_SCALABLE_H); /* CMPGE ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2) + /// CMPGE Presult.B, Pg/Z, Zop1.B, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPGE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpge, EA_SCALABLE, REG_P14, REG_P1, REG_V10, REG_V23, INS_OPTS_SCALABLE_H); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpge, EA_SCALABLE, REG_P14, REG_P6, REG_V21, REG_V13, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); /* CMPGE ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpge, EA_SCALABLE, REG_P11, REG_P7, REG_V21, 1, INS_OPTS_SCALABLE_H); /* CMPGE ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// CMPGE Presult.H, Pg/Z, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPGE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpge, EA_SCALABLE, REG_P14, REG_P1, REG_V10, REG_V23, INS_OPTS_SCALABLE_H); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpge, EA_SCALABLE, REG_P14, REG_P6, REG_V21, REG_V13, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); /* CMPGE ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpge, EA_SCALABLE, REG_P11, REG_P7, REG_V21, 1, INS_OPTS_SCALABLE_H); /* CMPGE ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2) + /// CMPGE Presult.H, Pg/Z, Zop1.H, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPGE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpge, EA_SCALABLE, REG_P14, REG_P1, REG_V10, REG_V23, INS_OPTS_SCALABLE_H); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpge, EA_SCALABLE, REG_P14, REG_P6, REG_V21, REG_V13, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); /* CMPGE ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpge, EA_SCALABLE, REG_P11, REG_P7, REG_V21, 1, INS_OPTS_SCALABLE_H); /* CMPGE ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge[_s32](svbool_t pg, svint32_t op1, svint32_t op2) + /// CMPGE Presult.S, Pg/Z, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPGE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpge, EA_SCALABLE, REG_P14, REG_P1, REG_V10, REG_V23, INS_OPTS_SCALABLE_H); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpge, EA_SCALABLE, REG_P14, REG_P6, REG_V21, REG_V13, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); /* CMPGE ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpge, EA_SCALABLE, REG_P11, REG_P7, REG_V21, 1, INS_OPTS_SCALABLE_H); /* CMPGE ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2) + /// CMPGE Presult.S, Pg/Z, Zop1.S, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPGE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpge, EA_SCALABLE, REG_P14, REG_P1, REG_V10, REG_V23, INS_OPTS_SCALABLE_H); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpge, EA_SCALABLE, REG_P14, REG_P6, REG_V21, REG_V13, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); /* CMPGE ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpge, EA_SCALABLE, REG_P11, REG_P7, REG_V21, 1, INS_OPTS_SCALABLE_H); /* CMPGE ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge[_s64](svbool_t pg, svint64_t op1, svint64_t op2) + /// CMPGE Presult.D, Pg/Z, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPGE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpge, EA_SCALABLE, REG_P14, REG_P1, REG_V10, REG_V23, INS_OPTS_SCALABLE_H); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpge, EA_SCALABLE, REG_P14, REG_P6, REG_V21, REG_V13, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); /* CMPGE ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpge, EA_SCALABLE, REG_P11, REG_P7, REG_V21, 1, INS_OPTS_SCALABLE_H); /* CMPGE ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// CMPHS Presult.B, Pg/Z, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPHS ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P4, REG_V1, REG_V26, INS_OPTS_SCALABLE_B); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P3, REG_V0, REG_V30, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); /* CMPHS ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P1, REG_V0, 36, INS_OPTS_SCALABLE_H); /* CMPHS ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge_wide[_u8](svbool_t pg, svuint8_t op1, svuint64_t op2) + /// CMPHS Presult.B, Pg/Z, Zop1.B, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPHS ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P4, REG_V1, REG_V26, INS_OPTS_SCALABLE_B); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P3, REG_V0, REG_V30, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); /* CMPHS ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P1, REG_V0, 36, INS_OPTS_SCALABLE_H); /* CMPHS ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// CMPHS Presult.H, Pg/Z, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPHS ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P4, REG_V1, REG_V26, INS_OPTS_SCALABLE_B); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P3, REG_V0, REG_V30, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); /* CMPHS ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P1, REG_V0, 36, INS_OPTS_SCALABLE_H); /* CMPHS ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge_wide[_u16](svbool_t pg, svuint16_t op1, svuint64_t op2) + /// CMPHS Presult.H, Pg/Z, Zop1.H, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPHS ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P4, REG_V1, REG_V26, INS_OPTS_SCALABLE_B); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P3, REG_V0, REG_V30, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); /* CMPHS ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P1, REG_V0, 36, INS_OPTS_SCALABLE_H); /* CMPHS ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) + /// CMPHS Presult.S, Pg/Z, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPHS ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P4, REG_V1, REG_V26, INS_OPTS_SCALABLE_B); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P3, REG_V0, REG_V30, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); /* CMPHS ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P1, REG_V0, 36, INS_OPTS_SCALABLE_H); /* CMPHS ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge_wide[_u32](svbool_t pg, svuint32_t op1, svuint64_t op2) + /// CMPHS Presult.S, Pg/Z, Zop1.S, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPHS ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P4, REG_V1, REG_V26, INS_OPTS_SCALABLE_B); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P3, REG_V0, REG_V30, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); /* CMPHS ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P1, REG_V0, 36, INS_OPTS_SCALABLE_H); /* CMPHS ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) + /// CMPHS Presult.D, Pg/Z, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPHS ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P4, REG_V1, REG_V26, INS_OPTS_SCALABLE_B); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P3, REG_V0, REG_V30, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); /* CMPHS ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P1, REG_V0, 36, INS_OPTS_SCALABLE_H); /* CMPHS ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FCMGE Presult.S, Pg/Z, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FCMGE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fcmge, EA_SCALABLE, REG_P13, REG_P5, REG_V8, REG_V18, INS_OPTS_SCALABLE_D); + /// IF_SVE_HI_3A FCMGE ., /Z, ., #0.0 + /// theEmitter->emitIns_R_R_R(INS_sve_fcmge, EA_SCALABLE, REG_P1, REG_P2, REG_V3, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + /// + /// svbool_t svcmpge[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FCMGE Presult.D, Pg/Z, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FCMGE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fcmge, EA_SCALABLE, REG_P13, REG_P5, REG_V8, REG_V18, INS_OPTS_SCALABLE_D); + /// IF_SVE_HI_3A FCMGE ., /Z, ., #0.0 + /// theEmitter->emitIns_R_R_R(INS_sve_fcmge, EA_SCALABLE, REG_P1, REG_P2, REG_V3, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + + /// CompareLessThan : Compare less than + + /// + /// svbool_t svcmplt[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// CMPGT Presult.B, Pg/Z, Zop2.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPGT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpgt, EA_SCALABLE, REG_P13, REG_P2, REG_V20, REG_V24, INS_OPTS_SCALABLE_S); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpgt, EA_SCALABLE, REG_P13, REG_P5, REG_V11, REG_V23, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WIDE); /* CMPGT ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpgt, EA_SCALABLE, REG_P10, REG_P1, REG_V18, 4, INS_OPTS_SCALABLE_S); /* CMPGT ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2) + /// CMPLT Presult.B, Pg/Z, Zop1.B, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPLT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmplt, EA_SCALABLE, REG_P2, REG_P0, REG_V14, REG_V30, INS_OPTS_SCALABLE_B); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmplt, EA_SCALABLE, REG_P1, REG_P7, REG_V24, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WIDE); /* CMPLT ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmplt, EA_SCALABLE, REG_P7, REG_P2, REG_V8, -16, INS_OPTS_SCALABLE_B); /* CMPLT ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// CMPGT Presult.H, Pg/Z, Zop2.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPGT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpgt, EA_SCALABLE, REG_P13, REG_P2, REG_V20, REG_V24, INS_OPTS_SCALABLE_S); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpgt, EA_SCALABLE, REG_P13, REG_P5, REG_V11, REG_V23, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WIDE); /* CMPGT ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpgt, EA_SCALABLE, REG_P10, REG_P1, REG_V18, 4, INS_OPTS_SCALABLE_S); /* CMPGT ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2) + /// CMPLT Presult.H, Pg/Z, Zop1.H, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPLT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmplt, EA_SCALABLE, REG_P2, REG_P0, REG_V14, REG_V30, INS_OPTS_SCALABLE_B); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmplt, EA_SCALABLE, REG_P1, REG_P7, REG_V24, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WIDE); /* CMPLT ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmplt, EA_SCALABLE, REG_P7, REG_P2, REG_V8, -16, INS_OPTS_SCALABLE_B); /* CMPLT ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt[_s32](svbool_t pg, svint32_t op1, svint32_t op2) + /// CMPGT Presult.S, Pg/Z, Zop2.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPGT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpgt, EA_SCALABLE, REG_P13, REG_P2, REG_V20, REG_V24, INS_OPTS_SCALABLE_S); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpgt, EA_SCALABLE, REG_P13, REG_P5, REG_V11, REG_V23, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WIDE); /* CMPGT ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpgt, EA_SCALABLE, REG_P10, REG_P1, REG_V18, 4, INS_OPTS_SCALABLE_S); /* CMPGT ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2) + /// CMPLT Presult.S, Pg/Z, Zop1.S, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPLT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmplt, EA_SCALABLE, REG_P2, REG_P0, REG_V14, REG_V30, INS_OPTS_SCALABLE_B); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmplt, EA_SCALABLE, REG_P1, REG_P7, REG_V24, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WIDE); /* CMPLT ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmplt, EA_SCALABLE, REG_P7, REG_P2, REG_V8, -16, INS_OPTS_SCALABLE_B); /* CMPLT ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt[_s64](svbool_t pg, svint64_t op1, svint64_t op2) + /// CMPGT Presult.D, Pg/Z, Zop2.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPGT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpgt, EA_SCALABLE, REG_P13, REG_P2, REG_V20, REG_V24, INS_OPTS_SCALABLE_S); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpgt, EA_SCALABLE, REG_P13, REG_P5, REG_V11, REG_V23, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WIDE); /* CMPGT ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpgt, EA_SCALABLE, REG_P10, REG_P1, REG_V18, 4, INS_OPTS_SCALABLE_S); /* CMPGT ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// CMPHI Presult.B, Pg/Z, Zop2.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPHI ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphi, EA_SCALABLE, REG_P12, REG_P3, REG_V30, REG_V25, INS_OPTS_SCALABLE_D); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphi, EA_SCALABLE, REG_P12, REG_P4, REG_V1, REG_V31, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPHI ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmphi, EA_SCALABLE, REG_P15, REG_P7, REG_V19, 0, INS_OPTS_SCALABLE_B); /* CMPHI ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt_wide[_u8](svbool_t pg, svuint8_t op1, svuint64_t op2) + /// CMPLO Presult.B, Pg/Z, Zop1.B, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPLO ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmplo, EA_SCALABLE, REG_P11, REG_P6, REG_V12, REG_V28, INS_OPTS_SCALABLE_S); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmplo, EA_SCALABLE, REG_P3, REG_P1, REG_V20, REG_V1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPLO ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmplo, EA_SCALABLE, REG_P8, REG_P5, REG_V21, 64, INS_OPTS_SCALABLE_S); /* CMPLO ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// CMPHI Presult.H, Pg/Z, Zop2.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPHI ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphi, EA_SCALABLE, REG_P12, REG_P3, REG_V30, REG_V25, INS_OPTS_SCALABLE_D); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphi, EA_SCALABLE, REG_P12, REG_P4, REG_V1, REG_V31, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPHI ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmphi, EA_SCALABLE, REG_P15, REG_P7, REG_V19, 0, INS_OPTS_SCALABLE_B); /* CMPHI ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt_wide[_u16](svbool_t pg, svuint16_t op1, svuint64_t op2) + /// CMPLO Presult.H, Pg/Z, Zop1.H, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPLO ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmplo, EA_SCALABLE, REG_P11, REG_P6, REG_V12, REG_V28, INS_OPTS_SCALABLE_S); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmplo, EA_SCALABLE, REG_P3, REG_P1, REG_V20, REG_V1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPLO ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmplo, EA_SCALABLE, REG_P8, REG_P5, REG_V21, 64, INS_OPTS_SCALABLE_S); /* CMPLO ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) + /// CMPHI Presult.S, Pg/Z, Zop2.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPHI ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphi, EA_SCALABLE, REG_P12, REG_P3, REG_V30, REG_V25, INS_OPTS_SCALABLE_D); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphi, EA_SCALABLE, REG_P12, REG_P4, REG_V1, REG_V31, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPHI ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmphi, EA_SCALABLE, REG_P15, REG_P7, REG_V19, 0, INS_OPTS_SCALABLE_B); /* CMPHI ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt_wide[_u32](svbool_t pg, svuint32_t op1, svuint64_t op2) + /// CMPLO Presult.S, Pg/Z, Zop1.S, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPLO ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmplo, EA_SCALABLE, REG_P11, REG_P6, REG_V12, REG_V28, INS_OPTS_SCALABLE_S); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmplo, EA_SCALABLE, REG_P3, REG_P1, REG_V20, REG_V1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPLO ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmplo, EA_SCALABLE, REG_P8, REG_P5, REG_V21, 64, INS_OPTS_SCALABLE_S); /* CMPLO ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) + /// CMPHI Presult.D, Pg/Z, Zop2.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPHI ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphi, EA_SCALABLE, REG_P12, REG_P3, REG_V30, REG_V25, INS_OPTS_SCALABLE_D); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphi, EA_SCALABLE, REG_P12, REG_P4, REG_V1, REG_V31, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPHI ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmphi, EA_SCALABLE, REG_P15, REG_P7, REG_V19, 0, INS_OPTS_SCALABLE_B); /* CMPHI ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FCMGT Presult.S, Pg/Z, Zop2.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FCMGT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fcmgt, EA_SCALABLE, REG_P3, REG_P6, REG_V18, REG_V28, INS_OPTS_SCALABLE_H); + /// IF_SVE_HI_3A FCMGT ., /Z, ., #0.0 + /// theEmitter->emitIns_R_R_R(INS_sve_fcmgt, EA_SCALABLE, REG_P11, REG_P5, REG_V2, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + /// + /// svbool_t svcmplt[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FCMGT Presult.D, Pg/Z, Zop2.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FCMGT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fcmgt, EA_SCALABLE, REG_P3, REG_P6, REG_V18, REG_V28, INS_OPTS_SCALABLE_H); + /// IF_SVE_HI_3A FCMGT ., /Z, ., #0.0 + /// theEmitter->emitIns_R_R_R(INS_sve_fcmgt, EA_SCALABLE, REG_P11, REG_P5, REG_V2, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + + /// CompareLessThanOrEqual : Compare less than or equal to + + /// + /// svbool_t svcmple[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// CMPGE Presult.B, Pg/Z, Zop2.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPGE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpge, EA_SCALABLE, REG_P14, REG_P1, REG_V10, REG_V23, INS_OPTS_SCALABLE_H); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpge, EA_SCALABLE, REG_P14, REG_P6, REG_V21, REG_V13, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); /* CMPGE ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpge, EA_SCALABLE, REG_P11, REG_P7, REG_V21, 1, INS_OPTS_SCALABLE_H); /* CMPGE ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2) + /// CMPLE Presult.B, Pg/Z, Zop1.B, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPLE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmple, EA_SCALABLE, REG_P10, REG_P5, REG_V11, REG_V27, INS_OPTS_SCALABLE_H); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmple, EA_SCALABLE, REG_P4, REG_P2, REG_V10, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WIDE); /* CMPLE ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmple, EA_SCALABLE, REG_P8, REG_P6, REG_V11, 15, INS_OPTS_SCALABLE_D); /* CMPLE ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// CMPGE Presult.H, Pg/Z, Zop2.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPGE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpge, EA_SCALABLE, REG_P14, REG_P1, REG_V10, REG_V23, INS_OPTS_SCALABLE_H); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpge, EA_SCALABLE, REG_P14, REG_P6, REG_V21, REG_V13, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); /* CMPGE ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpge, EA_SCALABLE, REG_P11, REG_P7, REG_V21, 1, INS_OPTS_SCALABLE_H); /* CMPGE ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2) + /// CMPLE Presult.H, Pg/Z, Zop1.H, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPLE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmple, EA_SCALABLE, REG_P10, REG_P5, REG_V11, REG_V27, INS_OPTS_SCALABLE_H); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmple, EA_SCALABLE, REG_P4, REG_P2, REG_V10, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WIDE); /* CMPLE ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmple, EA_SCALABLE, REG_P8, REG_P6, REG_V11, 15, INS_OPTS_SCALABLE_D); /* CMPLE ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple[_s32](svbool_t pg, svint32_t op1, svint32_t op2) + /// CMPGE Presult.S, Pg/Z, Zop2.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPGE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpge, EA_SCALABLE, REG_P14, REG_P1, REG_V10, REG_V23, INS_OPTS_SCALABLE_H); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpge, EA_SCALABLE, REG_P14, REG_P6, REG_V21, REG_V13, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); /* CMPGE ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpge, EA_SCALABLE, REG_P11, REG_P7, REG_V21, 1, INS_OPTS_SCALABLE_H); /* CMPGE ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2) + /// CMPLE Presult.S, Pg/Z, Zop1.S, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPLE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmple, EA_SCALABLE, REG_P10, REG_P5, REG_V11, REG_V27, INS_OPTS_SCALABLE_H); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmple, EA_SCALABLE, REG_P4, REG_P2, REG_V10, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WIDE); /* CMPLE ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmple, EA_SCALABLE, REG_P8, REG_P6, REG_V11, 15, INS_OPTS_SCALABLE_D); /* CMPLE ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple[_s64](svbool_t pg, svint64_t op1, svint64_t op2) + /// CMPGE Presult.D, Pg/Z, Zop2.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPGE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpge, EA_SCALABLE, REG_P14, REG_P1, REG_V10, REG_V23, INS_OPTS_SCALABLE_H); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpge, EA_SCALABLE, REG_P14, REG_P6, REG_V21, REG_V13, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); /* CMPGE ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpge, EA_SCALABLE, REG_P11, REG_P7, REG_V21, 1, INS_OPTS_SCALABLE_H); /* CMPGE ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// CMPHS Presult.B, Pg/Z, Zop2.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPHS ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P4, REG_V1, REG_V26, INS_OPTS_SCALABLE_B); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P3, REG_V0, REG_V30, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); /* CMPHS ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P1, REG_V0, 36, INS_OPTS_SCALABLE_H); /* CMPHS ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple_wide[_u8](svbool_t pg, svuint8_t op1, svuint64_t op2) + /// CMPLS Presult.B, Pg/Z, Zop1.B, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPLS ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpls, EA_SCALABLE, REG_P0, REG_P7, REG_V13, REG_V29, INS_OPTS_SCALABLE_D); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpls, EA_SCALABLE, REG_P2, REG_P0, REG_V30, REG_V2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); /* CMPLS ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpls, EA_SCALABLE, REG_P0, REG_P3, REG_V9, 127, INS_OPTS_SCALABLE_D); /* CMPLS ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// CMPHS Presult.H, Pg/Z, Zop2.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPHS ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P4, REG_V1, REG_V26, INS_OPTS_SCALABLE_B); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P3, REG_V0, REG_V30, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); /* CMPHS ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P1, REG_V0, 36, INS_OPTS_SCALABLE_H); /* CMPHS ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple_wide[_u16](svbool_t pg, svuint16_t op1, svuint64_t op2) + /// CMPLS Presult.H, Pg/Z, Zop1.H, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPLS ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpls, EA_SCALABLE, REG_P0, REG_P7, REG_V13, REG_V29, INS_OPTS_SCALABLE_D); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpls, EA_SCALABLE, REG_P2, REG_P0, REG_V30, REG_V2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); /* CMPLS ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpls, EA_SCALABLE, REG_P0, REG_P3, REG_V9, 127, INS_OPTS_SCALABLE_D); /* CMPLS ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) + /// CMPHS Presult.S, Pg/Z, Zop2.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPHS ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P4, REG_V1, REG_V26, INS_OPTS_SCALABLE_B); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P3, REG_V0, REG_V30, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); /* CMPHS ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P1, REG_V0, 36, INS_OPTS_SCALABLE_H); /* CMPHS ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple_wide[_u32](svbool_t pg, svuint32_t op1, svuint64_t op2) + /// CMPLS Presult.S, Pg/Z, Zop1.S, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPLS ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpls, EA_SCALABLE, REG_P0, REG_P7, REG_V13, REG_V29, INS_OPTS_SCALABLE_D); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpls, EA_SCALABLE, REG_P2, REG_P0, REG_V30, REG_V2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); /* CMPLS ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpls, EA_SCALABLE, REG_P0, REG_P3, REG_V9, 127, INS_OPTS_SCALABLE_D); /* CMPLS ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) + /// CMPHS Presult.D, Pg/Z, Zop2.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPHS ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P4, REG_V1, REG_V26, INS_OPTS_SCALABLE_B); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P3, REG_V0, REG_V30, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); /* CMPHS ., /Z, ., .D */ + /// IF_SVE_CY_3B CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmphs, EA_SCALABLE, REG_P11, REG_P1, REG_V0, 36, INS_OPTS_SCALABLE_H); /* CMPHS ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FCMGE Presult.S, Pg/Z, Zop2.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FCMGE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fcmge, EA_SCALABLE, REG_P13, REG_P5, REG_V8, REG_V18, INS_OPTS_SCALABLE_D); + /// IF_SVE_HI_3A FCMGE ., /Z, ., #0.0 + /// theEmitter->emitIns_R_R_R(INS_sve_fcmge, EA_SCALABLE, REG_P1, REG_P2, REG_V3, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + /// + /// svbool_t svcmple[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FCMGE Presult.D, Pg/Z, Zop2.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FCMGE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fcmge, EA_SCALABLE, REG_P13, REG_P5, REG_V8, REG_V18, INS_OPTS_SCALABLE_D); + /// IF_SVE_HI_3A FCMGE ., /Z, ., #0.0 + /// theEmitter->emitIns_R_R_R(INS_sve_fcmge, EA_SCALABLE, REG_P1, REG_P2, REG_V3, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + + /// CompareNotEqualTo : Compare not equal to + + /// + /// svbool_t svcmpne[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// CMPNE Presult.B, Pg/Z, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpne, EA_SCALABLE, REG_P3, REG_P1, REG_V15, REG_V20, INS_OPTS_SCALABLE_H); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpne, EA_SCALABLE, REG_P0, REG_P0, REG_V14, REG_V28, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPNE ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpne, EA_SCALABLE, REG_P0, REG_P5, REG_V0, -14, INS_OPTS_SCALABLE_H); /* CMPNE ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + /// + /// svbool_t svcmpne_wide[_s8](svbool_t pg, svint8_t op1, svint64_t op2) + /// CMPNE Presult.B, Pg/Z, Zop1.B, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpne, EA_SCALABLE, REG_P3, REG_P1, REG_V15, REG_V20, INS_OPTS_SCALABLE_H); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpne, EA_SCALABLE, REG_P0, REG_P0, REG_V14, REG_V28, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPNE ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpne, EA_SCALABLE, REG_P0, REG_P5, REG_V0, -14, INS_OPTS_SCALABLE_H); /* CMPNE ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + /// + /// svbool_t svcmpne[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// CMPNE Presult.H, Pg/Z, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpne, EA_SCALABLE, REG_P3, REG_P1, REG_V15, REG_V20, INS_OPTS_SCALABLE_H); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpne, EA_SCALABLE, REG_P0, REG_P0, REG_V14, REG_V28, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPNE ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpne, EA_SCALABLE, REG_P0, REG_P5, REG_V0, -14, INS_OPTS_SCALABLE_H); /* CMPNE ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + /// + /// svbool_t svcmpne_wide[_s16](svbool_t pg, svint16_t op1, svint64_t op2) + /// CMPNE Presult.H, Pg/Z, Zop1.H, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpne, EA_SCALABLE, REG_P3, REG_P1, REG_V15, REG_V20, INS_OPTS_SCALABLE_H); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpne, EA_SCALABLE, REG_P0, REG_P0, REG_V14, REG_V28, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPNE ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpne, EA_SCALABLE, REG_P0, REG_P5, REG_V0, -14, INS_OPTS_SCALABLE_H); /* CMPNE ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + /// + /// svbool_t svcmpne[_s32](svbool_t pg, svint32_t op1, svint32_t op2) + /// CMPNE Presult.S, Pg/Z, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpne, EA_SCALABLE, REG_P3, REG_P1, REG_V15, REG_V20, INS_OPTS_SCALABLE_H); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpne, EA_SCALABLE, REG_P0, REG_P0, REG_V14, REG_V28, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPNE ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpne, EA_SCALABLE, REG_P0, REG_P5, REG_V0, -14, INS_OPTS_SCALABLE_H); /* CMPNE ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + /// + /// svbool_t svcmpne_wide[_s32](svbool_t pg, svint32_t op1, svint64_t op2) + /// CMPNE Presult.S, Pg/Z, Zop1.S, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpne, EA_SCALABLE, REG_P3, REG_P1, REG_V15, REG_V20, INS_OPTS_SCALABLE_H); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpne, EA_SCALABLE, REG_P0, REG_P0, REG_V14, REG_V28, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPNE ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpne, EA_SCALABLE, REG_P0, REG_P5, REG_V0, -14, INS_OPTS_SCALABLE_H); /* CMPNE ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + /// + /// svbool_t svcmpne[_s64](svbool_t pg, svint64_t op1, svint64_t op2) + /// CMPNE Presult.D, Pg/Z, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpne, EA_SCALABLE, REG_P3, REG_P1, REG_V15, REG_V20, INS_OPTS_SCALABLE_H); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpne, EA_SCALABLE, REG_P0, REG_P0, REG_V14, REG_V28, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPNE ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpne, EA_SCALABLE, REG_P0, REG_P5, REG_V0, -14, INS_OPTS_SCALABLE_H); /* CMPNE ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + /// + /// svbool_t svcmpne[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// CMPNE Presult.B, Pg/Z, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpne, EA_SCALABLE, REG_P3, REG_P1, REG_V15, REG_V20, INS_OPTS_SCALABLE_H); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpne, EA_SCALABLE, REG_P0, REG_P0, REG_V14, REG_V28, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPNE ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpne, EA_SCALABLE, REG_P0, REG_P5, REG_V0, -14, INS_OPTS_SCALABLE_H); /* CMPNE ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + /// + /// svbool_t svcmpne[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// CMPNE Presult.H, Pg/Z, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpne, EA_SCALABLE, REG_P3, REG_P1, REG_V15, REG_V20, INS_OPTS_SCALABLE_H); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpne, EA_SCALABLE, REG_P0, REG_P0, REG_V14, REG_V28, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPNE ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpne, EA_SCALABLE, REG_P0, REG_P5, REG_V0, -14, INS_OPTS_SCALABLE_H); /* CMPNE ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + /// + /// svbool_t svcmpne[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) + /// CMPNE Presult.S, Pg/Z, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpne, EA_SCALABLE, REG_P3, REG_P1, REG_V15, REG_V20, INS_OPTS_SCALABLE_H); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpne, EA_SCALABLE, REG_P0, REG_P0, REG_V14, REG_V28, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPNE ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpne, EA_SCALABLE, REG_P0, REG_P5, REG_V0, -14, INS_OPTS_SCALABLE_H); /* CMPNE ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + /// + /// svbool_t svcmpne[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) + /// CMPNE Presult.D, Pg/Z, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CX_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpne, EA_SCALABLE, REG_P3, REG_P1, REG_V15, REG_V20, INS_OPTS_SCALABLE_H); + /// IF_SVE_CX_4A_A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_cmpne, EA_SCALABLE, REG_P0, REG_P0, REG_V14, REG_V28, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); /* CMPNE ., /Z, ., .D */ + /// IF_SVE_CY_3A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmpne, EA_SCALABLE, REG_P0, REG_P5, REG_V0, -14, INS_OPTS_SCALABLE_H); /* CMPNE ., /Z, ., # */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + /// + /// svbool_t svcmpne[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FCMNE Presult.S, Pg/Z, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FCMNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fcmne, EA_SCALABLE, REG_P11, REG_P1, REG_V21, REG_V10, INS_OPTS_SCALABLE_H); + /// IF_SVE_HI_3A FCMNE ., /Z, ., #0.0 + /// theEmitter->emitIns_R_R_R(INS_sve_fcmne, EA_SCALABLE, REG_P1, REG_P0, REG_V5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + /// + /// svbool_t svcmpne[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FCMNE Presult.D, Pg/Z, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FCMNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fcmne, EA_SCALABLE, REG_P11, REG_P1, REG_V21, REG_V10, INS_OPTS_SCALABLE_H); + /// IF_SVE_HI_3A FCMNE ., /Z, ., #0.0 + /// theEmitter->emitIns_R_R_R(INS_sve_fcmne, EA_SCALABLE, REG_P1, REG_P0, REG_V5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + + /// CompareUnordered : Compare unordered with + + /// + /// svbool_t svcmpuo[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FCMUO Presult.S, Pg/Z, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FCMUO ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fcmuo, EA_SCALABLE, REG_P5, REG_P2, REG_V31, REG_V20, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareUnordered(Vector left, Vector right) => CompareUnordered(left, right); + + /// + /// svbool_t svcmpuo[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FCMUO Presult.D, Pg/Z, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FCMUO ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fcmuo, EA_SCALABLE, REG_P5, REG_P2, REG_V31, REG_V20, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareUnordered(Vector left, Vector right) => CompareUnordered(left, right); + + + /// Compute16BitAddresses : Compute vector addresses for 16-bit data + + /// + /// svuint32_t svadrh[_u32base]_[s32]index(svuint32_t bases, svint32_t indices) + /// ADR Zresult.S, [Zbases.S, Zindices.S, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_BH_3A ADR ., [., .{, }] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V4, REG_V2, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V29, REG_V1, REG_V10, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_BH_3B ADR .D, [.D, .D, SXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V7, REG_V9, 0, INS_OPTS_SCALABLE_D_SXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V12, REG_V3, REG_V5, 2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_BH_3B_A ADR .D, [.D, .D, UXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V10, REG_V14, 0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V3, REG_V15, REG_V11, 3, INS_OPTS_SCALABLE_D_UXTW); + /// + public static unsafe Vector Compute16BitAddresses(Vector bases, Vector indices) => Compute16BitAddresses(bases, indices); + + /// + /// svuint32_t svadrh[_u32base]_[u32]index(svuint32_t bases, svuint32_t indices) + /// ADR Zresult.S, [Zbases.S, Zindices.S, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_BH_3A ADR ., [., .{, }] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V4, REG_V2, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V29, REG_V1, REG_V10, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_BH_3B ADR .D, [.D, .D, SXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V7, REG_V9, 0, INS_OPTS_SCALABLE_D_SXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V12, REG_V3, REG_V5, 2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_BH_3B_A ADR .D, [.D, .D, UXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V10, REG_V14, 0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V3, REG_V15, REG_V11, 3, INS_OPTS_SCALABLE_D_UXTW); + /// + public static unsafe Vector Compute16BitAddresses(Vector bases, Vector indices) => Compute16BitAddresses(bases, indices); + + /// + /// svuint64_t svadrh[_u64base]_[s64]index(svuint64_t bases, svint64_t indices) + /// ADR Zresult.D, [Zbases.D, Zindices.D, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_BH_3A ADR ., [., .{, }] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V4, REG_V2, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V29, REG_V1, REG_V10, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_BH_3B ADR .D, [.D, .D, SXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V7, REG_V9, 0, INS_OPTS_SCALABLE_D_SXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V12, REG_V3, REG_V5, 2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_BH_3B_A ADR .D, [.D, .D, UXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V10, REG_V14, 0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V3, REG_V15, REG_V11, 3, INS_OPTS_SCALABLE_D_UXTW); + /// + public static unsafe Vector Compute16BitAddresses(Vector bases, Vector indices) => Compute16BitAddresses(bases, indices); + + /// + /// svuint64_t svadrh[_u64base]_[u64]index(svuint64_t bases, svuint64_t indices) + /// ADR Zresult.D, [Zbases.D, Zindices.D, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_BH_3A ADR ., [., .{, }] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V4, REG_V2, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V29, REG_V1, REG_V10, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_BH_3B ADR .D, [.D, .D, SXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V7, REG_V9, 0, INS_OPTS_SCALABLE_D_SXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V12, REG_V3, REG_V5, 2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_BH_3B_A ADR .D, [.D, .D, UXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V10, REG_V14, 0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V3, REG_V15, REG_V11, 3, INS_OPTS_SCALABLE_D_UXTW); + /// + public static unsafe Vector Compute16BitAddresses(Vector bases, Vector indices) => Compute16BitAddresses(bases, indices); + + + /// Compute32BitAddresses : Compute vector addresses for 32-bit data + + /// + /// svuint32_t svadrw[_u32base]_[s32]index(svuint32_t bases, svint32_t indices) + /// ADR Zresult.S, [Zbases.S, Zindices.S, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_BH_3A ADR ., [., .{, }] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V4, REG_V2, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V29, REG_V1, REG_V10, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_BH_3B ADR .D, [.D, .D, SXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V7, REG_V9, 0, INS_OPTS_SCALABLE_D_SXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V12, REG_V3, REG_V5, 2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_BH_3B_A ADR .D, [.D, .D, UXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V10, REG_V14, 0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V3, REG_V15, REG_V11, 3, INS_OPTS_SCALABLE_D_UXTW); + /// + public static unsafe Vector Compute32BitAddresses(Vector bases, Vector indices) => Compute32BitAddresses(bases, indices); + + /// + /// svuint32_t svadrw[_u32base]_[u32]index(svuint32_t bases, svuint32_t indices) + /// ADR Zresult.S, [Zbases.S, Zindices.S, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_BH_3A ADR ., [., .{, }] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V4, REG_V2, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V29, REG_V1, REG_V10, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_BH_3B ADR .D, [.D, .D, SXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V7, REG_V9, 0, INS_OPTS_SCALABLE_D_SXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V12, REG_V3, REG_V5, 2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_BH_3B_A ADR .D, [.D, .D, UXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V10, REG_V14, 0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V3, REG_V15, REG_V11, 3, INS_OPTS_SCALABLE_D_UXTW); + /// + public static unsafe Vector Compute32BitAddresses(Vector bases, Vector indices) => Compute32BitAddresses(bases, indices); + + /// + /// svuint64_t svadrw[_u64base]_[s64]index(svuint64_t bases, svint64_t indices) + /// ADR Zresult.D, [Zbases.D, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_BH_3A ADR ., [., .{, }] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V4, REG_V2, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V29, REG_V1, REG_V10, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_BH_3B ADR .D, [.D, .D, SXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V7, REG_V9, 0, INS_OPTS_SCALABLE_D_SXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V12, REG_V3, REG_V5, 2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_BH_3B_A ADR .D, [.D, .D, UXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V10, REG_V14, 0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V3, REG_V15, REG_V11, 3, INS_OPTS_SCALABLE_D_UXTW); + /// + public static unsafe Vector Compute32BitAddresses(Vector bases, Vector indices) => Compute32BitAddresses(bases, indices); + + /// + /// svuint64_t svadrw[_u64base]_[u64]index(svuint64_t bases, svuint64_t indices) + /// ADR Zresult.D, [Zbases.D, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_BH_3A ADR ., [., .{, }] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V4, REG_V2, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V29, REG_V1, REG_V10, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_BH_3B ADR .D, [.D, .D, SXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V7, REG_V9, 0, INS_OPTS_SCALABLE_D_SXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V12, REG_V3, REG_V5, 2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_BH_3B_A ADR .D, [.D, .D, UXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V10, REG_V14, 0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V3, REG_V15, REG_V11, 3, INS_OPTS_SCALABLE_D_UXTW); + /// + public static unsafe Vector Compute32BitAddresses(Vector bases, Vector indices) => Compute32BitAddresses(bases, indices); + + + /// Compute64BitAddresses : Compute vector addresses for 64-bit data + + /// + /// svuint32_t svadrd[_u32base]_[s32]index(svuint32_t bases, svint32_t indices) + /// ADR Zresult.S, [Zbases.S, Zindices.S, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_BH_3A ADR ., [., .{, }] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V4, REG_V2, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V29, REG_V1, REG_V10, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_BH_3B ADR .D, [.D, .D, SXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V7, REG_V9, 0, INS_OPTS_SCALABLE_D_SXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V12, REG_V3, REG_V5, 2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_BH_3B_A ADR .D, [.D, .D, UXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V10, REG_V14, 0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V3, REG_V15, REG_V11, 3, INS_OPTS_SCALABLE_D_UXTW); + /// + public static unsafe Vector Compute64BitAddresses(Vector bases, Vector indices) => Compute64BitAddresses(bases, indices); + + /// + /// svuint32_t svadrd[_u32base]_[u32]index(svuint32_t bases, svuint32_t indices) + /// ADR Zresult.S, [Zbases.S, Zindices.S, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_BH_3A ADR ., [., .{, }] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V4, REG_V2, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V29, REG_V1, REG_V10, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_BH_3B ADR .D, [.D, .D, SXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V7, REG_V9, 0, INS_OPTS_SCALABLE_D_SXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V12, REG_V3, REG_V5, 2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_BH_3B_A ADR .D, [.D, .D, UXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V10, REG_V14, 0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V3, REG_V15, REG_V11, 3, INS_OPTS_SCALABLE_D_UXTW); + /// + public static unsafe Vector Compute64BitAddresses(Vector bases, Vector indices) => Compute64BitAddresses(bases, indices); + + /// + /// svuint64_t svadrd[_u64base]_[s64]index(svuint64_t bases, svint64_t indices) + /// ADR Zresult.D, [Zbases.D, Zindices.D, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_BH_3A ADR ., [., .{, }] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V4, REG_V2, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V29, REG_V1, REG_V10, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_BH_3B ADR .D, [.D, .D, SXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V7, REG_V9, 0, INS_OPTS_SCALABLE_D_SXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V12, REG_V3, REG_V5, 2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_BH_3B_A ADR .D, [.D, .D, UXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V10, REG_V14, 0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V3, REG_V15, REG_V11, 3, INS_OPTS_SCALABLE_D_UXTW); + /// + public static unsafe Vector Compute64BitAddresses(Vector bases, Vector indices) => Compute64BitAddresses(bases, indices); + + /// + /// svuint64_t svadrd[_u64base]_[u64]index(svuint64_t bases, svuint64_t indices) + /// ADR Zresult.D, [Zbases.D, Zindices.D, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_BH_3A ADR ., [., .{, }] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V4, REG_V2, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V29, REG_V1, REG_V10, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_BH_3B ADR .D, [.D, .D, SXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V7, REG_V9, 0, INS_OPTS_SCALABLE_D_SXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V12, REG_V3, REG_V5, 2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_BH_3B_A ADR .D, [.D, .D, UXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V10, REG_V14, 0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V3, REG_V15, REG_V11, 3, INS_OPTS_SCALABLE_D_UXTW); + /// + public static unsafe Vector Compute64BitAddresses(Vector bases, Vector indices) => Compute64BitAddresses(bases, indices); + + + /// Compute8BitAddresses : Compute vector addresses for 8-bit data + + /// + /// svuint32_t svadrb[_u32base]_[s32]offset(svuint32_t bases, svint32_t offsets) + /// ADR Zresult.S, [Zbases.S, Zoffsets.S] + /// + /// codegenarm64test: + /// IF_SVE_BH_3A ADR ., [., .{, }] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V4, REG_V2, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V29, REG_V1, REG_V10, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_BH_3B ADR .D, [.D, .D, SXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V7, REG_V9, 0, INS_OPTS_SCALABLE_D_SXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V12, REG_V3, REG_V5, 2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_BH_3B_A ADR .D, [.D, .D, UXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V10, REG_V14, 0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V3, REG_V15, REG_V11, 3, INS_OPTS_SCALABLE_D_UXTW); + /// + public static unsafe Vector Compute8BitAddresses(Vector bases, Vector indices) => Compute8BitAddresses(bases, indices); + + /// + /// svuint32_t svadrb[_u32base]_[u32]offset(svuint32_t bases, svuint32_t offsets) + /// ADR Zresult.S, [Zbases.S, Zoffsets.S] + /// + /// codegenarm64test: + /// IF_SVE_BH_3A ADR ., [., .{, }] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V4, REG_V2, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V29, REG_V1, REG_V10, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_BH_3B ADR .D, [.D, .D, SXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V7, REG_V9, 0, INS_OPTS_SCALABLE_D_SXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V12, REG_V3, REG_V5, 2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_BH_3B_A ADR .D, [.D, .D, UXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V10, REG_V14, 0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V3, REG_V15, REG_V11, 3, INS_OPTS_SCALABLE_D_UXTW); + /// + public static unsafe Vector Compute8BitAddresses(Vector bases, Vector indices) => Compute8BitAddresses(bases, indices); + + /// + /// svuint64_t svadrb[_u64base]_[s64]offset(svuint64_t bases, svint64_t offsets) + /// ADR Zresult.D, [Zbases.D, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_BH_3A ADR ., [., .{, }] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V4, REG_V2, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V29, REG_V1, REG_V10, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_BH_3B ADR .D, [.D, .D, SXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V7, REG_V9, 0, INS_OPTS_SCALABLE_D_SXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V12, REG_V3, REG_V5, 2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_BH_3B_A ADR .D, [.D, .D, UXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V10, REG_V14, 0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V3, REG_V15, REG_V11, 3, INS_OPTS_SCALABLE_D_UXTW); + /// + public static unsafe Vector Compute8BitAddresses(Vector bases, Vector indices) => Compute8BitAddresses(bases, indices); + + /// + /// svuint64_t svadrb[_u64base]_[u64]offset(svuint64_t bases, svuint64_t offsets) + /// ADR Zresult.D, [Zbases.D, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_BH_3A ADR ., [., .{, }] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V4, REG_V2, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V29, REG_V1, REG_V10, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_BH_3B ADR .D, [.D, .D, SXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V7, REG_V9, 0, INS_OPTS_SCALABLE_D_SXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V12, REG_V3, REG_V5, 2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_BH_3B_A ADR .D, [.D, .D, UXTW{}] + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V9, REG_V10, REG_V14, 0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitInsSve_R_R_R_I(INS_sve_adr, EA_SCALABLE, REG_V3, REG_V15, REG_V11, 3, INS_OPTS_SCALABLE_D_UXTW); + /// + public static unsafe Vector Compute8BitAddresses(Vector bases, Vector indices) => Compute8BitAddresses(bases, indices); + + + /// ConditionalExtractAfterLastActiveElement : Conditionally extract element after last + + /// + /// svint8_t svclasta[_s8](svbool_t pg, svint8_t fallback, svint8_t data) + /// CLASTA Ztied.B, Pg, Ztied.B, Zdata.B + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.B, Pg, Zresult.B, Zdata.B + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValue, data); + + /// + /// svint8_t svclasta[_s8](svbool_t pg, svint8_t fallback, svint8_t data) + /// CLASTA Ztied.B, Pg, Ztied.B, Zdata.B + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.B, Pg, Zresult.B, Zdata.B + /// int8_t svclasta[_n_s8](svbool_t pg, int8_t fallback, svint8_t data) + /// CLASTA Wtied, Pg, Wtied, Zdata.B + /// CLASTA Btied, Pg, Btied, Zdata.B + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe sbyte ConditionalExtractAfterLastActiveElement(Vector mask, sbyte defaultValues, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValues, data); + + /// + /// svint16_t svclasta[_s16](svbool_t pg, svint16_t fallback, svint16_t data) + /// CLASTA Ztied.H, Pg, Ztied.H, Zdata.H + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.H, Pg, Zresult.H, Zdata.H + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValue, data); + + /// + /// svint16_t svclasta[_s16](svbool_t pg, svint16_t fallback, svint16_t data) + /// CLASTA Ztied.H, Pg, Ztied.H, Zdata.H + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.H, Pg, Zresult.H, Zdata.H + /// int16_t svclasta[_n_s16](svbool_t pg, int16_t fallback, svint16_t data) + /// CLASTA Wtied, Pg, Wtied, Zdata.H + /// CLASTA Htied, Pg, Htied, Zdata.H + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe short ConditionalExtractAfterLastActiveElement(Vector mask, short defaultValues, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValues, data); + + /// + /// svint32_t svclasta[_s32](svbool_t pg, svint32_t fallback, svint32_t data) + /// CLASTA Ztied.S, Pg, Ztied.S, Zdata.S + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.S, Pg, Zresult.S, Zdata.S + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValue, data); + + /// + /// svint32_t svclasta[_s32](svbool_t pg, svint32_t fallback, svint32_t data) + /// CLASTA Ztied.S, Pg, Ztied.S, Zdata.S + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.S, Pg, Zresult.S, Zdata.S + /// int32_t svclasta[_n_s32](svbool_t pg, int32_t fallback, svint32_t data) + /// CLASTA Wtied, Pg, Wtied, Zdata.S + /// CLASTA Stied, Pg, Stied, Zdata.S + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe int ConditionalExtractAfterLastActiveElement(Vector mask, int defaultValues, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValues, data); + + /// + /// svint64_t svclasta[_s64](svbool_t pg, svint64_t fallback, svint64_t data) + /// CLASTA Ztied.D, Pg, Ztied.D, Zdata.D + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.D, Pg, Zresult.D, Zdata.D + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValue, data); + + /// + /// svint64_t svclasta[_s64](svbool_t pg, svint64_t fallback, svint64_t data) + /// CLASTA Ztied.D, Pg, Ztied.D, Zdata.D + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.D, Pg, Zresult.D, Zdata.D + /// int64_t svclasta[_n_s64](svbool_t pg, int64_t fallback, svint64_t data) + /// CLASTA Xtied, Pg, Xtied, Zdata.D + /// CLASTA Dtied, Pg, Dtied, Zdata.D + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe long ConditionalExtractAfterLastActiveElement(Vector mask, long defaultValues, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValues, data); + + /// + /// svuint8_t svclasta[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data) + /// CLASTA Ztied.B, Pg, Ztied.B, Zdata.B + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.B, Pg, Zresult.B, Zdata.B + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValue, data); + + /// + /// svuint8_t svclasta[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data) + /// CLASTA Ztied.B, Pg, Ztied.B, Zdata.B + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.B, Pg, Zresult.B, Zdata.B + /// uint8_t svclasta[_n_u8](svbool_t pg, uint8_t fallback, svuint8_t data) + /// CLASTA Wtied, Pg, Wtied, Zdata.B + /// CLASTA Btied, Pg, Btied, Zdata.B + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe byte ConditionalExtractAfterLastActiveElement(Vector mask, byte defaultValues, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValues, data); + + /// + /// svuint16_t svclasta[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data) + /// CLASTA Ztied.H, Pg, Ztied.H, Zdata.H + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.H, Pg, Zresult.H, Zdata.H + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValue, data); + + /// + /// svuint16_t svclasta[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data) + /// CLASTA Ztied.H, Pg, Ztied.H, Zdata.H + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.H, Pg, Zresult.H, Zdata.H + /// uint16_t svclasta[_n_u16](svbool_t pg, uint16_t fallback, svuint16_t data) + /// CLASTA Wtied, Pg, Wtied, Zdata.H + /// CLASTA Htied, Pg, Htied, Zdata.H + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe ushort ConditionalExtractAfterLastActiveElement(Vector mask, ushort defaultValues, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValues, data); + + /// + /// svuint32_t svclasta[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data) + /// CLASTA Ztied.S, Pg, Ztied.S, Zdata.S + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.S, Pg, Zresult.S, Zdata.S + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValue, data); + + /// + /// svuint32_t svclasta[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data) + /// CLASTA Ztied.S, Pg, Ztied.S, Zdata.S + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.S, Pg, Zresult.S, Zdata.S + /// uint32_t svclasta[_n_u32](svbool_t pg, uint32_t fallback, svuint32_t data) + /// CLASTA Wtied, Pg, Wtied, Zdata.S + /// CLASTA Stied, Pg, Stied, Zdata.S + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe uint ConditionalExtractAfterLastActiveElement(Vector mask, uint defaultValues, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValues, data); + + /// + /// svuint64_t svclasta[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data) + /// CLASTA Ztied.D, Pg, Ztied.D, Zdata.D + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.D, Pg, Zresult.D, Zdata.D + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValue, data); + + /// + /// svuint64_t svclasta[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data) + /// CLASTA Ztied.D, Pg, Ztied.D, Zdata.D + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.D, Pg, Zresult.D, Zdata.D + /// uint64_t svclasta[_n_u64](svbool_t pg, uint64_t fallback, svuint64_t data) + /// CLASTA Xtied, Pg, Xtied, Zdata.D + /// CLASTA Dtied, Pg, Dtied, Zdata.D + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe ulong ConditionalExtractAfterLastActiveElement(Vector mask, ulong defaultValues, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValues, data); + + /// + /// svfloat32_t svclasta[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data) + /// CLASTA Ztied.S, Pg, Ztied.S, Zdata.S + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.S, Pg, Zresult.S, Zdata.S + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValue, data); + + /// + /// svfloat32_t svclasta[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data) + /// CLASTA Ztied.S, Pg, Ztied.S, Zdata.S + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.S, Pg, Zresult.S, Zdata.S + /// float32_t svclasta[_n_f32](svbool_t pg, float32_t fallback, svfloat32_t data) + /// CLASTA Wtied, Pg, Wtied, Zdata.S + /// CLASTA Stied, Pg, Stied, Zdata.S + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe float ConditionalExtractAfterLastActiveElement(Vector mask, float defaultValues, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValues, data); + + /// + /// svfloat64_t svclasta[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data) + /// CLASTA Ztied.D, Pg, Ztied.D, Zdata.D + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.D, Pg, Zresult.D, Zdata.D + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValue, data); + + /// + /// svfloat64_t svclasta[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data) + /// CLASTA Ztied.D, Pg, Ztied.D, Zdata.D + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.D, Pg, Zresult.D, Zdata.D + /// float64_t svclasta[_n_f64](svbool_t pg, float64_t fallback, svfloat64_t data) + /// CLASTA Xtied, Pg, Xtied, Zdata.D + /// CLASTA Dtied, Pg, Dtied, Zdata.D + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe double ConditionalExtractAfterLastActiveElement(Vector mask, double defaultValues, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValues, data); + + + /// ConditionalExtractAfterLastActiveElementAndReplicate : Conditionally extract element after last + + /// + /// svint8_t svclasta[_s8](svbool_t pg, svint8_t fallback, svint8_t data) + /// CLASTA Ztied.B, Pg, Ztied.B, Zdata.B + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.B, Pg, Zresult.B, Zdata.B + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) => ConditionalExtractAfterLastActiveElementAndReplicate(mask, defaultScalar, data); + + /// + /// svint16_t svclasta[_s16](svbool_t pg, svint16_t fallback, svint16_t data) + /// CLASTA Ztied.H, Pg, Ztied.H, Zdata.H + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.H, Pg, Zresult.H, Zdata.H + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) => ConditionalExtractAfterLastActiveElementAndReplicate(mask, defaultScalar, data); + + /// + /// svint32_t svclasta[_s32](svbool_t pg, svint32_t fallback, svint32_t data) + /// CLASTA Ztied.S, Pg, Ztied.S, Zdata.S + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.S, Pg, Zresult.S, Zdata.S + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) => ConditionalExtractAfterLastActiveElementAndReplicate(mask, defaultScalar, data); + + /// + /// svint64_t svclasta[_s64](svbool_t pg, svint64_t fallback, svint64_t data) + /// CLASTA Ztied.D, Pg, Ztied.D, Zdata.D + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.D, Pg, Zresult.D, Zdata.D + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) => ConditionalExtractAfterLastActiveElementAndReplicate(mask, defaultScalar, data); + + /// + /// svuint8_t svclasta[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data) + /// CLASTA Ztied.B, Pg, Ztied.B, Zdata.B + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.B, Pg, Zresult.B, Zdata.B + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) => ConditionalExtractAfterLastActiveElementAndReplicate(mask, defaultScalar, data); + + /// + /// svuint16_t svclasta[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data) + /// CLASTA Ztied.H, Pg, Ztied.H, Zdata.H + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.H, Pg, Zresult.H, Zdata.H + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) => ConditionalExtractAfterLastActiveElementAndReplicate(mask, defaultScalar, data); + + /// + /// svuint32_t svclasta[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data) + /// CLASTA Ztied.S, Pg, Ztied.S, Zdata.S + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.S, Pg, Zresult.S, Zdata.S + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) => ConditionalExtractAfterLastActiveElementAndReplicate(mask, defaultScalar, data); + + /// + /// svuint64_t svclasta[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data) + /// CLASTA Ztied.D, Pg, Ztied.D, Zdata.D + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.D, Pg, Zresult.D, Zdata.D + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) => ConditionalExtractAfterLastActiveElementAndReplicate(mask, defaultScalar, data); + + /// + /// svfloat32_t svclasta[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data) + /// CLASTA Ztied.S, Pg, Ztied.S, Zdata.S + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.S, Pg, Zresult.S, Zdata.S + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) => ConditionalExtractAfterLastActiveElementAndReplicate(mask, defaultScalar, data); + + /// + /// svfloat64_t svclasta[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data) + /// CLASTA Ztied.D, Pg, Ztied.D, Zdata.D + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.D, Pg, Zresult.D, Zdata.D + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) => ConditionalExtractAfterLastActiveElementAndReplicate(mask, defaultScalar, data); + + + /// ConditionalExtractLastActiveElement : Conditionally extract last element + + /// + /// svint8_t svclastb[_s8](svbool_t pg, svint8_t fallback, svint8_t data) + /// CLASTB Ztied.B, Pg, Ztied.B, Zdata.B + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.B, Pg, Zresult.B, Zdata.B + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValue, data); + + /// + /// svint8_t svclastb[_s8](svbool_t pg, svint8_t fallback, svint8_t data) + /// CLASTB Ztied.B, Pg, Ztied.B, Zdata.B + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.B, Pg, Zresult.B, Zdata.B + /// int8_t svclastb[_n_s8](svbool_t pg, int8_t fallback, svint8_t data) + /// CLASTB Wtied, Pg, Wtied, Zdata.B + /// CLASTB Btied, Pg, Btied, Zdata.B + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe sbyte ConditionalExtractLastActiveElement(Vector mask, sbyte defaultValues, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValues, data); + + /// + /// svint16_t svclastb[_s16](svbool_t pg, svint16_t fallback, svint16_t data) + /// CLASTB Ztied.H, Pg, Ztied.H, Zdata.H + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.H, Pg, Zresult.H, Zdata.H + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValue, data); + + /// + /// svint16_t svclastb[_s16](svbool_t pg, svint16_t fallback, svint16_t data) + /// CLASTB Ztied.H, Pg, Ztied.H, Zdata.H + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.H, Pg, Zresult.H, Zdata.H + /// int16_t svclastb[_n_s16](svbool_t pg, int16_t fallback, svint16_t data) + /// CLASTB Wtied, Pg, Wtied, Zdata.H + /// CLASTB Htied, Pg, Htied, Zdata.H + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe short ConditionalExtractLastActiveElement(Vector mask, short defaultValues, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValues, data); + + /// + /// svint32_t svclastb[_s32](svbool_t pg, svint32_t fallback, svint32_t data) + /// CLASTB Ztied.S, Pg, Ztied.S, Zdata.S + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.S, Pg, Zresult.S, Zdata.S + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValue, data); + + /// + /// svint32_t svclastb[_s32](svbool_t pg, svint32_t fallback, svint32_t data) + /// CLASTB Ztied.S, Pg, Ztied.S, Zdata.S + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.S, Pg, Zresult.S, Zdata.S + /// int32_t svclastb[_n_s32](svbool_t pg, int32_t fallback, svint32_t data) + /// CLASTB Wtied, Pg, Wtied, Zdata.S + /// CLASTB Stied, Pg, Stied, Zdata.S + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe int ConditionalExtractLastActiveElement(Vector mask, int defaultValues, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValues, data); + + /// + /// svint64_t svclastb[_s64](svbool_t pg, svint64_t fallback, svint64_t data) + /// CLASTB Ztied.D, Pg, Ztied.D, Zdata.D + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.D, Pg, Zresult.D, Zdata.D + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValue, data); + + /// + /// svint64_t svclastb[_s64](svbool_t pg, svint64_t fallback, svint64_t data) + /// CLASTB Ztied.D, Pg, Ztied.D, Zdata.D + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.D, Pg, Zresult.D, Zdata.D + /// int64_t svclastb[_n_s64](svbool_t pg, int64_t fallback, svint64_t data) + /// CLASTB Xtied, Pg, Xtied, Zdata.D + /// CLASTB Dtied, Pg, Dtied, Zdata.D + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe long ConditionalExtractLastActiveElement(Vector mask, long defaultValues, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValues, data); + + /// + /// svuint8_t svclastb[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data) + /// CLASTB Ztied.B, Pg, Ztied.B, Zdata.B + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.B, Pg, Zresult.B, Zdata.B + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValue, data); + + /// + /// svuint8_t svclastb[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data) + /// CLASTB Ztied.B, Pg, Ztied.B, Zdata.B + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.B, Pg, Zresult.B, Zdata.B + /// uint8_t svclastb[_n_u8](svbool_t pg, uint8_t fallback, svuint8_t data) + /// CLASTB Wtied, Pg, Wtied, Zdata.B + /// CLASTB Btied, Pg, Btied, Zdata.B + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe byte ConditionalExtractLastActiveElement(Vector mask, byte defaultValues, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValues, data); + + /// + /// svuint16_t svclastb[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data) + /// CLASTB Ztied.H, Pg, Ztied.H, Zdata.H + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.H, Pg, Zresult.H, Zdata.H + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValue, data); + + /// + /// svuint16_t svclastb[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data) + /// CLASTB Ztied.H, Pg, Ztied.H, Zdata.H + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.H, Pg, Zresult.H, Zdata.H + /// uint16_t svclastb[_n_u16](svbool_t pg, uint16_t fallback, svuint16_t data) + /// CLASTB Wtied, Pg, Wtied, Zdata.H + /// CLASTB Htied, Pg, Htied, Zdata.H + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe ushort ConditionalExtractLastActiveElement(Vector mask, ushort defaultValues, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValues, data); + + /// + /// svuint32_t svclastb[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data) + /// CLASTB Ztied.S, Pg, Ztied.S, Zdata.S + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.S, Pg, Zresult.S, Zdata.S + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValue, data); + + /// + /// svuint32_t svclastb[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data) + /// CLASTB Ztied.S, Pg, Ztied.S, Zdata.S + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.S, Pg, Zresult.S, Zdata.S + /// uint32_t svclastb[_n_u32](svbool_t pg, uint32_t fallback, svuint32_t data) + /// CLASTB Wtied, Pg, Wtied, Zdata.S + /// CLASTB Stied, Pg, Stied, Zdata.S + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe uint ConditionalExtractLastActiveElement(Vector mask, uint defaultValues, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValues, data); + + /// + /// svuint64_t svclastb[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data) + /// CLASTB Ztied.D, Pg, Ztied.D, Zdata.D + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.D, Pg, Zresult.D, Zdata.D + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValue, data); + + /// + /// svuint64_t svclastb[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data) + /// CLASTB Ztied.D, Pg, Ztied.D, Zdata.D + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.D, Pg, Zresult.D, Zdata.D + /// uint64_t svclastb[_n_u64](svbool_t pg, uint64_t fallback, svuint64_t data) + /// CLASTB Xtied, Pg, Xtied, Zdata.D + /// CLASTB Dtied, Pg, Dtied, Zdata.D + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe ulong ConditionalExtractLastActiveElement(Vector mask, ulong defaultValues, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValues, data); + + /// + /// svfloat32_t svclastb[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data) + /// CLASTB Ztied.S, Pg, Ztied.S, Zdata.S + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.S, Pg, Zresult.S, Zdata.S + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValue, data); + + /// + /// svfloat32_t svclastb[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data) + /// CLASTB Ztied.S, Pg, Ztied.S, Zdata.S + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.S, Pg, Zresult.S, Zdata.S + /// float32_t svclastb[_n_f32](svbool_t pg, float32_t fallback, svfloat32_t data) + /// CLASTB Wtied, Pg, Wtied, Zdata.S + /// CLASTB Stied, Pg, Stied, Zdata.S + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe float ConditionalExtractLastActiveElement(Vector mask, float defaultValues, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValues, data); + + /// + /// svfloat64_t svclastb[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data) + /// CLASTB Ztied.D, Pg, Ztied.D, Zdata.D + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.D, Pg, Zresult.D, Zdata.D + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValue, data); + + /// + /// svfloat64_t svclastb[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data) + /// CLASTB Ztied.D, Pg, Ztied.D, Zdata.D + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.D, Pg, Zresult.D, Zdata.D + /// float64_t svclastb[_n_f64](svbool_t pg, float64_t fallback, svfloat64_t data) + /// CLASTB Xtied, Pg, Xtied, Zdata.D + /// CLASTB Dtied, Pg, Dtied, Zdata.D + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe double ConditionalExtractLastActiveElement(Vector mask, double defaultValues, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValues, data); + + + /// ConditionalExtractLastActiveElementAndReplicate : Conditionally extract last element + + /// + /// svint8_t svclastb[_s8](svbool_t pg, svint8_t fallback, svint8_t data) + /// CLASTB Ztied.B, Pg, Ztied.B, Zdata.B + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.B, Pg, Zresult.B, Zdata.B + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) => ConditionalExtractLastActiveElementAndReplicate(mask, fallback, data); + + /// + /// svint16_t svclastb[_s16](svbool_t pg, svint16_t fallback, svint16_t data) + /// CLASTB Ztied.H, Pg, Ztied.H, Zdata.H + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.H, Pg, Zresult.H, Zdata.H + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) => ConditionalExtractLastActiveElementAndReplicate(mask, fallback, data); + + /// + /// svint32_t svclastb[_s32](svbool_t pg, svint32_t fallback, svint32_t data) + /// CLASTB Ztied.S, Pg, Ztied.S, Zdata.S + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.S, Pg, Zresult.S, Zdata.S + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) => ConditionalExtractLastActiveElementAndReplicate(mask, fallback, data); + + /// + /// svint64_t svclastb[_s64](svbool_t pg, svint64_t fallback, svint64_t data) + /// CLASTB Ztied.D, Pg, Ztied.D, Zdata.D + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.D, Pg, Zresult.D, Zdata.D + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) => ConditionalExtractLastActiveElementAndReplicate(mask, fallback, data); + + /// + /// svuint8_t svclastb[_u8](svbool_t pg, svuint8_t fallback, svuint8_t data) + /// CLASTB Ztied.B, Pg, Ztied.B, Zdata.B + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.B, Pg, Zresult.B, Zdata.B + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) => ConditionalExtractLastActiveElementAndReplicate(mask, fallback, data); + + /// + /// svuint16_t svclastb[_u16](svbool_t pg, svuint16_t fallback, svuint16_t data) + /// CLASTB Ztied.H, Pg, Ztied.H, Zdata.H + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.H, Pg, Zresult.H, Zdata.H + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) => ConditionalExtractLastActiveElementAndReplicate(mask, fallback, data); + + /// + /// svuint32_t svclastb[_u32](svbool_t pg, svuint32_t fallback, svuint32_t data) + /// CLASTB Ztied.S, Pg, Ztied.S, Zdata.S + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.S, Pg, Zresult.S, Zdata.S + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) => ConditionalExtractLastActiveElementAndReplicate(mask, fallback, data); + + /// + /// svuint64_t svclastb[_u64](svbool_t pg, svuint64_t fallback, svuint64_t data) + /// CLASTB Ztied.D, Pg, Ztied.D, Zdata.D + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.D, Pg, Zresult.D, Zdata.D + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) => ConditionalExtractLastActiveElementAndReplicate(mask, fallback, data); + + /// + /// svfloat32_t svclastb[_f32](svbool_t pg, svfloat32_t fallback, svfloat32_t data) + /// CLASTB Ztied.S, Pg, Ztied.S, Zdata.S + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.S, Pg, Zresult.S, Zdata.S + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) => ConditionalExtractLastActiveElementAndReplicate(mask, fallback, data); + + /// + /// svfloat64_t svclastb[_f64](svbool_t pg, svfloat64_t fallback, svfloat64_t data) + /// CLASTB Ztied.D, Pg, Ztied.D, Zdata.D + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.D, Pg, Zresult.D, Zdata.D + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) => ConditionalExtractLastActiveElementAndReplicate(mask, fallback, data); + + + /// ConditionalSelect : Conditionally select elements + + /// + /// svint8_t svsel[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// SEL Zresult.B, Pg, Zop1.B, Zop2.B + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) + /// SEL Presult.B, Pg, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_P4, REG_P6, REG_P13, REG_P10, INS_OPTS_SCALABLE_B); /* SEL .B, , .B, .B */ + /// IF_SVE_CW_4A SEL ., , ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_V29, REG_P15, REG_V28, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_V5, REG_P13, REG_V27, REG_V5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) => ConditionalSelect(mask, left, right); + + /// + /// svint16_t svsel[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// SEL Zresult.H, Pg, Zop1.H, Zop2.H + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) + /// SEL Presult.B, Pg, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_P4, REG_P6, REG_P13, REG_P10, INS_OPTS_SCALABLE_B); /* SEL .B, , .B, .B */ + /// IF_SVE_CW_4A SEL ., , ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_V29, REG_P15, REG_V28, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_V5, REG_P13, REG_V27, REG_V5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) => ConditionalSelect(mask, left, right); + + /// + /// svint32_t svsel[_s32](svbool_t pg, svint32_t op1, svint32_t op2) + /// SEL Zresult.S, Pg, Zop1.S, Zop2.S + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) + /// SEL Presult.B, Pg, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_P4, REG_P6, REG_P13, REG_P10, INS_OPTS_SCALABLE_B); /* SEL .B, , .B, .B */ + /// IF_SVE_CW_4A SEL ., , ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_V29, REG_P15, REG_V28, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_V5, REG_P13, REG_V27, REG_V5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) => ConditionalSelect(mask, left, right); + + /// + /// svint64_t svsel[_s64](svbool_t pg, svint64_t op1, svint64_t op2) + /// SEL Zresult.D, Pg, Zop1.D, Zop2.D + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) + /// SEL Presult.B, Pg, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_P4, REG_P6, REG_P13, REG_P10, INS_OPTS_SCALABLE_B); /* SEL .B, , .B, .B */ + /// IF_SVE_CW_4A SEL ., , ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_V29, REG_P15, REG_V28, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_V5, REG_P13, REG_V27, REG_V5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) => ConditionalSelect(mask, left, right); + + /// + /// svuint8_t svsel[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// SEL Zresult.B, Pg, Zop1.B, Zop2.B + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) + /// SEL Presult.B, Pg, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_P4, REG_P6, REG_P13, REG_P10, INS_OPTS_SCALABLE_B); /* SEL .B, , .B, .B */ + /// IF_SVE_CW_4A SEL ., , ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_V29, REG_P15, REG_V28, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_V5, REG_P13, REG_V27, REG_V5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) => ConditionalSelect(mask, left, right); + + /// + /// svuint16_t svsel[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// SEL Zresult.H, Pg, Zop1.H, Zop2.H + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) + /// SEL Presult.B, Pg, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_P4, REG_P6, REG_P13, REG_P10, INS_OPTS_SCALABLE_B); /* SEL .B, , .B, .B */ + /// IF_SVE_CW_4A SEL ., , ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_V29, REG_P15, REG_V28, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_V5, REG_P13, REG_V27, REG_V5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) => ConditionalSelect(mask, left, right); + + /// + /// svuint32_t svsel[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) + /// SEL Zresult.S, Pg, Zop1.S, Zop2.S + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) + /// SEL Presult.B, Pg, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_P4, REG_P6, REG_P13, REG_P10, INS_OPTS_SCALABLE_B); /* SEL .B, , .B, .B */ + /// IF_SVE_CW_4A SEL ., , ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_V29, REG_P15, REG_V28, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_V5, REG_P13, REG_V27, REG_V5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) => ConditionalSelect(mask, left, right); + + /// + /// svuint64_t svsel[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) + /// SEL Zresult.D, Pg, Zop1.D, Zop2.D + /// svbool_t svsel[_b](svbool_t pg, svbool_t op1, svbool_t op2) + /// SEL Presult.B, Pg, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_P4, REG_P6, REG_P13, REG_P10, INS_OPTS_SCALABLE_B); /* SEL .B, , .B, .B */ + /// IF_SVE_CW_4A SEL ., , ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_V29, REG_P15, REG_V28, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_V5, REG_P13, REG_V27, REG_V5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) => ConditionalSelect(mask, left, right); + + /// + /// svfloat32_t svsel[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// SEL Zresult.S, Pg, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_P4, REG_P6, REG_P13, REG_P10, INS_OPTS_SCALABLE_B); /* SEL .B, , .B, .B */ + /// IF_SVE_CW_4A SEL ., , ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_V29, REG_P15, REG_V28, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_V5, REG_P13, REG_V27, REG_V5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) => ConditionalSelect(mask, left, right); + + /// + /// svfloat64_t svsel[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// SEL Zresult.D, Pg, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_P4, REG_P6, REG_P13, REG_P10, INS_OPTS_SCALABLE_B); /* SEL .B, , .B, .B */ + /// IF_SVE_CW_4A SEL ., , ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_V29, REG_P15, REG_V28, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_V5, REG_P13, REG_V27, REG_V5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) => ConditionalSelect(mask, left, right); + + + /// ConvertToDouble : Floating-point convert + + /// + /// svfloat64_t svcvt_f64[_s32]_m(svfloat64_t inactive, svbool_t pg, svint32_t op) + /// SCVTF Ztied.D, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; SCVTF Zresult.D, Pg/M, Zop.S + /// svfloat64_t svcvt_f64[_s32]_x(svbool_t pg, svint32_t op) + /// SCVTF Ztied.D, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; SCVTF Zresult.D, Pg/M, Zop.S + /// svfloat64_t svcvt_f64[_s32]_z(svbool_t pg, svint32_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; SCVTF Zresult.D, Pg/M, Zop.S + /// + /// codegenarm64test: + /// sve_scvtf - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToDouble(Vector value) => ConvertToDouble(value); + + /// + /// svfloat64_t svcvt_f64[_s64]_m(svfloat64_t inactive, svbool_t pg, svint64_t op) + /// SCVTF Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; SCVTF Zresult.D, Pg/M, Zop.D + /// svfloat64_t svcvt_f64[_s64]_x(svbool_t pg, svint64_t op) + /// SCVTF Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; SCVTF Zresult.D, Pg/M, Zop.D + /// svfloat64_t svcvt_f64[_s64]_z(svbool_t pg, svint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; SCVTF Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// sve_scvtf - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToDouble(Vector value) => ConvertToDouble(value); + + /// + /// svfloat64_t svcvt_f64[_u32]_m(svfloat64_t inactive, svbool_t pg, svuint32_t op) + /// UCVTF Ztied.D, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; UCVTF Zresult.D, Pg/M, Zop.S + /// svfloat64_t svcvt_f64[_u32]_x(svbool_t pg, svuint32_t op) + /// UCVTF Ztied.D, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; UCVTF Zresult.D, Pg/M, Zop.S + /// svfloat64_t svcvt_f64[_u32]_z(svbool_t pg, svuint32_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; UCVTF Zresult.D, Pg/M, Zop.S + /// + /// codegenarm64test: + /// sve_ucvtf - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToDouble(Vector value) => ConvertToDouble(value); + + /// + /// svfloat64_t svcvt_f64[_u64]_m(svfloat64_t inactive, svbool_t pg, svuint64_t op) + /// UCVTF Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; UCVTF Zresult.D, Pg/M, Zop.D + /// svfloat64_t svcvt_f64[_u64]_x(svbool_t pg, svuint64_t op) + /// UCVTF Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; UCVTF Zresult.D, Pg/M, Zop.D + /// svfloat64_t svcvt_f64[_u64]_z(svbool_t pg, svuint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; UCVTF Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// sve_ucvtf - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToDouble(Vector value) => ConvertToDouble(value); + + /// + /// svfloat64_t svcvt_f64[_f32]_m(svfloat64_t inactive, svbool_t pg, svfloat32_t op) + /// FCVT Ztied.D, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; FCVT Zresult.D, Pg/M, Zop.S + /// svfloat64_t svcvt_f64[_f32]_x(svbool_t pg, svfloat32_t op) + /// FCVT Ztied.D, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; FCVT Zresult.D, Pg/M, Zop.S + /// svfloat64_t svcvt_f64[_f32]_z(svbool_t pg, svfloat32_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; FCVT Zresult.D, Pg/M, Zop.S + /// + /// codegenarm64test: + /// sve_fcvt - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToDouble(Vector value) => ConvertToDouble(value); + + + /// ConvertToInt32 : Floating-point convert + + /// + /// svint32_t svcvt_s32[_f32]_m(svint32_t inactive, svbool_t pg, svfloat32_t op) + /// FCVTZS Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; FCVTZS Zresult.S, Pg/M, Zop.S + /// svint32_t svcvt_s32[_f32]_x(svbool_t pg, svfloat32_t op) + /// FCVTZS Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; FCVTZS Zresult.S, Pg/M, Zop.S + /// svint32_t svcvt_s32[_f32]_z(svbool_t pg, svfloat32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; FCVTZS Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// sve_fcvtzs - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToInt32(Vector value) => ConvertToInt32(value); + + /// + /// svint32_t svcvt_s32[_f64]_m(svint32_t inactive, svbool_t pg, svfloat64_t op) + /// FCVTZS Ztied.S, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; FCVTZS Zresult.S, Pg/M, Zop.D + /// svint32_t svcvt_s32[_f64]_x(svbool_t pg, svfloat64_t op) + /// FCVTZS Ztied.S, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; FCVTZS Zresult.S, Pg/M, Zop.D + /// svint32_t svcvt_s32[_f64]_z(svbool_t pg, svfloat64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; FCVTZS Zresult.S, Pg/M, Zop.D + /// + /// codegenarm64test: + /// sve_fcvtzs - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToInt32(Vector value) => ConvertToInt32(value); + + + /// ConvertToInt64 : Floating-point convert + + /// + /// svint64_t svcvt_s64[_f32]_m(svint64_t inactive, svbool_t pg, svfloat32_t op) + /// FCVTZS Ztied.D, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; FCVTZS Zresult.D, Pg/M, Zop.S + /// svint64_t svcvt_s64[_f32]_x(svbool_t pg, svfloat32_t op) + /// FCVTZS Ztied.D, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; FCVTZS Zresult.D, Pg/M, Zop.S + /// svint64_t svcvt_s64[_f32]_z(svbool_t pg, svfloat32_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; FCVTZS Zresult.D, Pg/M, Zop.S + /// + /// codegenarm64test: + /// sve_fcvtzs - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToInt64(Vector value) => ConvertToInt64(value); + + /// + /// svint64_t svcvt_s64[_f64]_m(svint64_t inactive, svbool_t pg, svfloat64_t op) + /// FCVTZS Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; FCVTZS Zresult.D, Pg/M, Zop.D + /// svint64_t svcvt_s64[_f64]_x(svbool_t pg, svfloat64_t op) + /// FCVTZS Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; FCVTZS Zresult.D, Pg/M, Zop.D + /// svint64_t svcvt_s64[_f64]_z(svbool_t pg, svfloat64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; FCVTZS Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// sve_fcvtzs - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToInt64(Vector value) => ConvertToInt64(value); + + + /// ConvertToSingle : Floating-point convert + + /// + /// svfloat32_t svcvt_f32[_s32]_m(svfloat32_t inactive, svbool_t pg, svint32_t op) + /// SCVTF Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; SCVTF Zresult.S, Pg/M, Zop.S + /// svfloat32_t svcvt_f32[_s32]_x(svbool_t pg, svint32_t op) + /// SCVTF Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; SCVTF Zresult.S, Pg/M, Zop.S + /// svfloat32_t svcvt_f32[_s32]_z(svbool_t pg, svint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; SCVTF Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// sve_scvtf - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToSingle(Vector value) => ConvertToSingle(value); + + /// + /// svfloat32_t svcvt_f32[_s64]_m(svfloat32_t inactive, svbool_t pg, svint64_t op) + /// SCVTF Ztied.S, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; SCVTF Zresult.S, Pg/M, Zop.D + /// svfloat32_t svcvt_f32[_s64]_x(svbool_t pg, svint64_t op) + /// SCVTF Ztied.S, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; SCVTF Zresult.S, Pg/M, Zop.D + /// svfloat32_t svcvt_f32[_s64]_z(svbool_t pg, svint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; SCVTF Zresult.S, Pg/M, Zop.D + /// + /// codegenarm64test: + /// sve_scvtf - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToSingle(Vector value) => ConvertToSingle(value); + + /// + /// svfloat32_t svcvt_f32[_u32]_m(svfloat32_t inactive, svbool_t pg, svuint32_t op) + /// UCVTF Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; UCVTF Zresult.S, Pg/M, Zop.S + /// svfloat32_t svcvt_f32[_u32]_x(svbool_t pg, svuint32_t op) + /// UCVTF Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; UCVTF Zresult.S, Pg/M, Zop.S + /// svfloat32_t svcvt_f32[_u32]_z(svbool_t pg, svuint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; UCVTF Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// sve_ucvtf - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToSingle(Vector value) => ConvertToSingle(value); + + /// + /// svfloat32_t svcvt_f32[_u64]_m(svfloat32_t inactive, svbool_t pg, svuint64_t op) + /// UCVTF Ztied.S, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; UCVTF Zresult.S, Pg/M, Zop.D + /// svfloat32_t svcvt_f32[_u64]_x(svbool_t pg, svuint64_t op) + /// UCVTF Ztied.S, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; UCVTF Zresult.S, Pg/M, Zop.D + /// svfloat32_t svcvt_f32[_u64]_z(svbool_t pg, svuint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; UCVTF Zresult.S, Pg/M, Zop.D + /// + /// codegenarm64test: + /// sve_ucvtf - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToSingle(Vector value) => ConvertToSingle(value); + + /// + /// svfloat32_t svcvt_f32[_f64]_m(svfloat32_t inactive, svbool_t pg, svfloat64_t op) + /// FCVT Ztied.S, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; FCVT Zresult.S, Pg/M, Zop.D + /// svfloat32_t svcvt_f32[_f64]_x(svbool_t pg, svfloat64_t op) + /// FCVT Ztied.S, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; FCVT Zresult.S, Pg/M, Zop.D + /// svfloat32_t svcvt_f32[_f64]_z(svbool_t pg, svfloat64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; FCVT Zresult.S, Pg/M, Zop.D + /// + /// codegenarm64test: + /// sve_fcvt - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToSingle(Vector value) => ConvertToSingle(value); + + + /// ConvertToUInt32 : Floating-point convert + + /// + /// svuint32_t svcvt_u32[_f32]_m(svuint32_t inactive, svbool_t pg, svfloat32_t op) + /// FCVTZU Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; FCVTZU Zresult.S, Pg/M, Zop.S + /// svuint32_t svcvt_u32[_f32]_x(svbool_t pg, svfloat32_t op) + /// FCVTZU Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; FCVTZU Zresult.S, Pg/M, Zop.S + /// svuint32_t svcvt_u32[_f32]_z(svbool_t pg, svfloat32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; FCVTZU Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// sve_fcvtzu - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToUInt32(Vector value) => ConvertToUInt32(value); + + /// + /// svuint32_t svcvt_u32[_f64]_m(svuint32_t inactive, svbool_t pg, svfloat64_t op) + /// FCVTZU Ztied.S, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; FCVTZU Zresult.S, Pg/M, Zop.D + /// svuint32_t svcvt_u32[_f64]_x(svbool_t pg, svfloat64_t op) + /// FCVTZU Ztied.S, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; FCVTZU Zresult.S, Pg/M, Zop.D + /// svuint32_t svcvt_u32[_f64]_z(svbool_t pg, svfloat64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; FCVTZU Zresult.S, Pg/M, Zop.D + /// + /// codegenarm64test: + /// sve_fcvtzu - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToUInt32(Vector value) => ConvertToUInt32(value); + + + /// ConvertToUInt64 : Floating-point convert + + /// + /// svuint64_t svcvt_u64[_f32]_m(svuint64_t inactive, svbool_t pg, svfloat32_t op) + /// FCVTZU Ztied.D, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; FCVTZU Zresult.D, Pg/M, Zop.S + /// svuint64_t svcvt_u64[_f32]_x(svbool_t pg, svfloat32_t op) + /// FCVTZU Ztied.D, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; FCVTZU Zresult.D, Pg/M, Zop.S + /// svuint64_t svcvt_u64[_f32]_z(svbool_t pg, svfloat32_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; FCVTZU Zresult.D, Pg/M, Zop.S + /// + /// codegenarm64test: + /// sve_fcvtzu - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToUInt64(Vector value) => ConvertToUInt64(value); + + /// + /// svuint64_t svcvt_u64[_f64]_m(svuint64_t inactive, svbool_t pg, svfloat64_t op) + /// FCVTZU Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; FCVTZU Zresult.D, Pg/M, Zop.D + /// svuint64_t svcvt_u64[_f64]_x(svbool_t pg, svfloat64_t op) + /// FCVTZU Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; FCVTZU Zresult.D, Pg/M, Zop.D + /// svuint64_t svcvt_u64[_f64]_z(svbool_t pg, svfloat64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; FCVTZU Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// sve_fcvtzu - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToUInt64(Vector value) => ConvertToUInt64(value); + + + /// Count16BitElements : Count the number of 16-bit elements in a vector + + /// + /// uint64_t svcnth_pat(enum svpattern pattern) + /// CNTH Xresult, pattern + /// + /// codegenarm64test: + /// IF_SVE_BL_1A CNTH {, {, MUL #}} + /// theEmitter->emitIns_R_PATTERN_I(INS_sve_cnth, EA_8BYTE, REG_R12, SVE_PATTERN_VL7, 5); + /// theEmitter->emitIns_R_PATTERN_I(INS_sve_cnth, EA_8BYTE, REG_R5, SVE_PATTERN_ALL, 13); + /// + public static unsafe ulong Count16BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => Count16BitElements(pattern); + + + /// Count32BitElements : Count the number of 32-bit elements in a vector + + /// + /// uint64_t svcntw_pat(enum svpattern pattern) + /// CNTW Xresult, pattern + /// + /// codegenarm64test: + /// IF_SVE_BL_1A CNTW {, {, MUL #}} + /// theEmitter->emitIns_R_PATTERN_I(INS_sve_cntw, EA_8BYTE, REG_R23, SVE_PATTERN_VL256, 7); + /// + public static unsafe ulong Count32BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => Count32BitElements(pattern); + + + /// Count64BitElements : Count the number of 64-bit elements in a vector + + /// + /// uint64_t svcntd_pat(enum svpattern pattern) + /// CNTD Xresult, pattern + /// + /// codegenarm64test: + /// IF_SVE_BL_1A CNTD {, {, MUL #}} + /// theEmitter->emitIns_R_PATTERN_I(INS_sve_cntd, EA_8BYTE, REG_R30, SVE_PATTERN_VL1, 16); + /// theEmitter->emitIns_R_PATTERN_I(INS_sve_cntd, EA_8BYTE, REG_R15, SVE_PATTERN_MUL3, 10); + /// + public static unsafe ulong Count64BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => Count64BitElements(pattern); + + + /// Count8BitElements : Count the number of 8-bit elements in a vector + + /// + /// uint64_t svcntb_pat(enum svpattern pattern) + /// CNTB Xresult, pattern + /// + /// codegenarm64test: + /// IF_SVE_BL_1A CNTB {, {, MUL #}} + /// theEmitter->emitIns_R_PATTERN_I(INS_sve_cntb, EA_8BYTE, REG_R0, SVE_PATTERN_POW2, 1); + /// theEmitter->emitIns_R_PATTERN_I(INS_sve_cntb, EA_8BYTE, REG_R21, SVE_PATTERN_MUL4, 8); + /// + public static unsafe ulong Count8BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => Count8BitElements(pattern); + + + + /// CreateBreakAfterMask : Break after first true condition + + /// + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// BRKA Ptied.B, Pg/M, Pop.B + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) + /// BRKA Presult.B, Pg/Z, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DB_3A BRKA .B, /, .B + /// theEmitter->emitIns_R_R_R(INS_sve_brka, EA_SCALABLE, REG_P0, REG_P8, REG_P15, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_brka, EA_SCALABLE, REG_P0, REG_P8, REG_P15, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_PREDICATE_MERGE); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask) => CreateBreakAfterMask(totalMask, fromMask); + + /// + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// BRKA Ptied.B, Pg/M, Pop.B + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) + /// BRKA Presult.B, Pg/Z, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DB_3A BRKA .B, /, .B + /// theEmitter->emitIns_R_R_R(INS_sve_brka, EA_SCALABLE, REG_P0, REG_P8, REG_P15, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_brka, EA_SCALABLE, REG_P0, REG_P8, REG_P15, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_PREDICATE_MERGE); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask) => CreateBreakAfterMask(totalMask, fromMask); + + /// + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// BRKA Ptied.B, Pg/M, Pop.B + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) + /// BRKA Presult.B, Pg/Z, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DB_3A BRKA .B, /, .B + /// theEmitter->emitIns_R_R_R(INS_sve_brka, EA_SCALABLE, REG_P0, REG_P8, REG_P15, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_brka, EA_SCALABLE, REG_P0, REG_P8, REG_P15, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_PREDICATE_MERGE); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask) => CreateBreakAfterMask(totalMask, fromMask); + + /// + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// BRKA Ptied.B, Pg/M, Pop.B + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) + /// BRKA Presult.B, Pg/Z, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DB_3A BRKA .B, /, .B + /// theEmitter->emitIns_R_R_R(INS_sve_brka, EA_SCALABLE, REG_P0, REG_P8, REG_P15, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_brka, EA_SCALABLE, REG_P0, REG_P8, REG_P15, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_PREDICATE_MERGE); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask) => CreateBreakAfterMask(totalMask, fromMask); + + /// + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// BRKA Ptied.B, Pg/M, Pop.B + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) + /// BRKA Presult.B, Pg/Z, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DB_3A BRKA .B, /, .B + /// theEmitter->emitIns_R_R_R(INS_sve_brka, EA_SCALABLE, REG_P0, REG_P8, REG_P15, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_brka, EA_SCALABLE, REG_P0, REG_P8, REG_P15, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_PREDICATE_MERGE); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask) => CreateBreakAfterMask(totalMask, fromMask); + + /// + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// BRKA Ptied.B, Pg/M, Pop.B + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) + /// BRKA Presult.B, Pg/Z, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DB_3A BRKA .B, /, .B + /// theEmitter->emitIns_R_R_R(INS_sve_brka, EA_SCALABLE, REG_P0, REG_P8, REG_P15, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_brka, EA_SCALABLE, REG_P0, REG_P8, REG_P15, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_PREDICATE_MERGE); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask) => CreateBreakAfterMask(totalMask, fromMask); + + /// + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// BRKA Ptied.B, Pg/M, Pop.B + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) + /// BRKA Presult.B, Pg/Z, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DB_3A BRKA .B, /, .B + /// theEmitter->emitIns_R_R_R(INS_sve_brka, EA_SCALABLE, REG_P0, REG_P8, REG_P15, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_brka, EA_SCALABLE, REG_P0, REG_P8, REG_P15, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_PREDICATE_MERGE); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask) => CreateBreakAfterMask(totalMask, fromMask); + + /// + /// svbool_t svbrka[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// BRKA Ptied.B, Pg/M, Pop.B + /// svbool_t svbrka[_b]_z(svbool_t pg, svbool_t op) + /// BRKA Presult.B, Pg/Z, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DB_3A BRKA .B, /, .B + /// theEmitter->emitIns_R_R_R(INS_sve_brka, EA_SCALABLE, REG_P0, REG_P8, REG_P15, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_brka, EA_SCALABLE, REG_P0, REG_P8, REG_P15, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_PREDICATE_MERGE); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask) => CreateBreakAfterMask(totalMask, fromMask); + + + /// CreateBreakAfterPropagateMask : Break after first true condition, propagating from previous partition + + /// + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BRKPA Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_DA_4A BRKPA .B, /Z, .B, .B + /// theEmitter->emitIns_R_R_R_R(INS_sve_brkpa, EA_SCALABLE, REG_P0, REG_P1, REG_P10, REG_P15, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right) => CreateBreakAfterPropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BRKPA Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_DA_4A BRKPA .B, /Z, .B, .B + /// theEmitter->emitIns_R_R_R_R(INS_sve_brkpa, EA_SCALABLE, REG_P0, REG_P1, REG_P10, REG_P15, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right) => CreateBreakAfterPropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BRKPA Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_DA_4A BRKPA .B, /Z, .B, .B + /// theEmitter->emitIns_R_R_R_R(INS_sve_brkpa, EA_SCALABLE, REG_P0, REG_P1, REG_P10, REG_P15, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right) => CreateBreakAfterPropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BRKPA Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_DA_4A BRKPA .B, /Z, .B, .B + /// theEmitter->emitIns_R_R_R_R(INS_sve_brkpa, EA_SCALABLE, REG_P0, REG_P1, REG_P10, REG_P15, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right) => CreateBreakAfterPropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BRKPA Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_DA_4A BRKPA .B, /Z, .B, .B + /// theEmitter->emitIns_R_R_R_R(INS_sve_brkpa, EA_SCALABLE, REG_P0, REG_P1, REG_P10, REG_P15, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right) => CreateBreakAfterPropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BRKPA Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_DA_4A BRKPA .B, /Z, .B, .B + /// theEmitter->emitIns_R_R_R_R(INS_sve_brkpa, EA_SCALABLE, REG_P0, REG_P1, REG_P10, REG_P15, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right) => CreateBreakAfterPropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BRKPA Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_DA_4A BRKPA .B, /Z, .B, .B + /// theEmitter->emitIns_R_R_R_R(INS_sve_brkpa, EA_SCALABLE, REG_P0, REG_P1, REG_P10, REG_P15, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right) => CreateBreakAfterPropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpa[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BRKPA Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_DA_4A BRKPA .B, /Z, .B, .B + /// theEmitter->emitIns_R_R_R_R(INS_sve_brkpa, EA_SCALABLE, REG_P0, REG_P1, REG_P10, REG_P15, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right) => CreateBreakAfterPropagateMask(mask, left, right); + + + /// CreateBreakBeforeMask : Break before first true condition + + /// + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// BRKB Ptied.B, Pg/M, Pop.B + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) + /// BRKB Presult.B, Pg/Z, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DB_3A BRKB .B, /, .B + /// theEmitter->emitIns_R_R_R(INS_sve_brkb, EA_SCALABLE, REG_P2, REG_P9, REG_P14, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_brkb, EA_SCALABLE, REG_P2, REG_P9, REG_P14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_PREDICATE_MERGE); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask) => CreateBreakBeforeMask(totalMask, fromMask); + + /// + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// BRKB Ptied.B, Pg/M, Pop.B + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) + /// BRKB Presult.B, Pg/Z, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DB_3A BRKB .B, /, .B + /// theEmitter->emitIns_R_R_R(INS_sve_brkb, EA_SCALABLE, REG_P2, REG_P9, REG_P14, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_brkb, EA_SCALABLE, REG_P2, REG_P9, REG_P14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_PREDICATE_MERGE); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask) => CreateBreakBeforeMask(totalMask, fromMask); + + /// + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// BRKB Ptied.B, Pg/M, Pop.B + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) + /// BRKB Presult.B, Pg/Z, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DB_3A BRKB .B, /, .B + /// theEmitter->emitIns_R_R_R(INS_sve_brkb, EA_SCALABLE, REG_P2, REG_P9, REG_P14, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_brkb, EA_SCALABLE, REG_P2, REG_P9, REG_P14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_PREDICATE_MERGE); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask) => CreateBreakBeforeMask(totalMask, fromMask); + + /// + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// BRKB Ptied.B, Pg/M, Pop.B + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) + /// BRKB Presult.B, Pg/Z, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DB_3A BRKB .B, /, .B + /// theEmitter->emitIns_R_R_R(INS_sve_brkb, EA_SCALABLE, REG_P2, REG_P9, REG_P14, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_brkb, EA_SCALABLE, REG_P2, REG_P9, REG_P14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_PREDICATE_MERGE); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask) => CreateBreakBeforeMask(totalMask, fromMask); + + /// + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// BRKB Ptied.B, Pg/M, Pop.B + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) + /// BRKB Presult.B, Pg/Z, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DB_3A BRKB .B, /, .B + /// theEmitter->emitIns_R_R_R(INS_sve_brkb, EA_SCALABLE, REG_P2, REG_P9, REG_P14, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_brkb, EA_SCALABLE, REG_P2, REG_P9, REG_P14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_PREDICATE_MERGE); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask) => CreateBreakBeforeMask(totalMask, fromMask); + + /// + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// BRKB Ptied.B, Pg/M, Pop.B + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) + /// BRKB Presult.B, Pg/Z, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DB_3A BRKB .B, /, .B + /// theEmitter->emitIns_R_R_R(INS_sve_brkb, EA_SCALABLE, REG_P2, REG_P9, REG_P14, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_brkb, EA_SCALABLE, REG_P2, REG_P9, REG_P14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_PREDICATE_MERGE); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask) => CreateBreakBeforeMask(totalMask, fromMask); + + /// + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// BRKB Ptied.B, Pg/M, Pop.B + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) + /// BRKB Presult.B, Pg/Z, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DB_3A BRKB .B, /, .B + /// theEmitter->emitIns_R_R_R(INS_sve_brkb, EA_SCALABLE, REG_P2, REG_P9, REG_P14, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_brkb, EA_SCALABLE, REG_P2, REG_P9, REG_P14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_PREDICATE_MERGE); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask) => CreateBreakBeforeMask(totalMask, fromMask); + + /// + /// svbool_t svbrkb[_b]_m(svbool_t inactive, svbool_t pg, svbool_t op) + /// BRKB Ptied.B, Pg/M, Pop.B + /// svbool_t svbrkb[_b]_z(svbool_t pg, svbool_t op) + /// BRKB Presult.B, Pg/Z, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DB_3A BRKB .B, /, .B + /// theEmitter->emitIns_R_R_R(INS_sve_brkb, EA_SCALABLE, REG_P2, REG_P9, REG_P14, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_brkb, EA_SCALABLE, REG_P2, REG_P9, REG_P14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_PREDICATE_MERGE); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask) => CreateBreakBeforeMask(totalMask, fromMask); + + + /// CreateBreakBeforePropagateMask : Break before first true condition, propagating from previous partition + + /// + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BRKPB Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_DA_4A BRKPB .B, /Z, .B, .B + /// theEmitter->emitIns_R_R_R_R(INS_sve_brkpb, EA_SCALABLE, REG_P7, REG_P8, REG_P11, REG_P13, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right) => CreateBreakBeforePropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BRKPB Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_DA_4A BRKPB .B, /Z, .B, .B + /// theEmitter->emitIns_R_R_R_R(INS_sve_brkpb, EA_SCALABLE, REG_P7, REG_P8, REG_P11, REG_P13, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right) => CreateBreakBeforePropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BRKPB Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_DA_4A BRKPB .B, /Z, .B, .B + /// theEmitter->emitIns_R_R_R_R(INS_sve_brkpb, EA_SCALABLE, REG_P7, REG_P8, REG_P11, REG_P13, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right) => CreateBreakBeforePropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BRKPB Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_DA_4A BRKPB .B, /Z, .B, .B + /// theEmitter->emitIns_R_R_R_R(INS_sve_brkpb, EA_SCALABLE, REG_P7, REG_P8, REG_P11, REG_P13, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right) => CreateBreakBeforePropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BRKPB Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_DA_4A BRKPB .B, /Z, .B, .B + /// theEmitter->emitIns_R_R_R_R(INS_sve_brkpb, EA_SCALABLE, REG_P7, REG_P8, REG_P11, REG_P13, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right) => CreateBreakBeforePropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BRKPB Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_DA_4A BRKPB .B, /Z, .B, .B + /// theEmitter->emitIns_R_R_R_R(INS_sve_brkpb, EA_SCALABLE, REG_P7, REG_P8, REG_P11, REG_P13, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right) => CreateBreakBeforePropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BRKPB Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_DA_4A BRKPB .B, /Z, .B, .B + /// theEmitter->emitIns_R_R_R_R(INS_sve_brkpb, EA_SCALABLE, REG_P7, REG_P8, REG_P11, REG_P13, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right) => CreateBreakBeforePropagateMask(mask, left, right); + + /// + /// svbool_t svbrkpb[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BRKPB Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_DA_4A BRKPB .B, /Z, .B, .B + /// theEmitter->emitIns_R_R_R_R(INS_sve_brkpb, EA_SCALABLE, REG_P7, REG_P8, REG_P11, REG_P13, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right) => CreateBreakBeforePropagateMask(mask, left, right); + + + /// CreateBreakPropagateMask : Propagate break to next partition + + /// + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BRKN Ptied2.B, Pg/Z, Pop1.B, Ptied2.B + /// + /// codegenarm64test: + /// IF_SVE_DC_3A BRKN .B, /Z, .B, .B + /// theEmitter->emitIns_R_R_R(INS_sve_brkn, EA_SCALABLE, REG_P0, REG_P8, REG_P15, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask) => CreateBreakPropagateMask(totalMask, fromMask); + + /// + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BRKN Ptied2.B, Pg/Z, Pop1.B, Ptied2.B + /// + /// codegenarm64test: + /// IF_SVE_DC_3A BRKN .B, /Z, .B, .B + /// theEmitter->emitIns_R_R_R(INS_sve_brkn, EA_SCALABLE, REG_P0, REG_P8, REG_P15, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask) => CreateBreakPropagateMask(totalMask, fromMask); + + /// + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BRKN Ptied2.B, Pg/Z, Pop1.B, Ptied2.B + /// + /// codegenarm64test: + /// IF_SVE_DC_3A BRKN .B, /Z, .B, .B + /// theEmitter->emitIns_R_R_R(INS_sve_brkn, EA_SCALABLE, REG_P0, REG_P8, REG_P15, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask) => CreateBreakPropagateMask(totalMask, fromMask); + + /// + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BRKN Ptied2.B, Pg/Z, Pop1.B, Ptied2.B + /// + /// codegenarm64test: + /// IF_SVE_DC_3A BRKN .B, /Z, .B, .B + /// theEmitter->emitIns_R_R_R(INS_sve_brkn, EA_SCALABLE, REG_P0, REG_P8, REG_P15, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask) => CreateBreakPropagateMask(totalMask, fromMask); + + /// + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BRKN Ptied2.B, Pg/Z, Pop1.B, Ptied2.B + /// + /// codegenarm64test: + /// IF_SVE_DC_3A BRKN .B, /Z, .B, .B + /// theEmitter->emitIns_R_R_R(INS_sve_brkn, EA_SCALABLE, REG_P0, REG_P8, REG_P15, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask) => CreateBreakPropagateMask(totalMask, fromMask); + + /// + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BRKN Ptied2.B, Pg/Z, Pop1.B, Ptied2.B + /// + /// codegenarm64test: + /// IF_SVE_DC_3A BRKN .B, /Z, .B, .B + /// theEmitter->emitIns_R_R_R(INS_sve_brkn, EA_SCALABLE, REG_P0, REG_P8, REG_P15, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask) => CreateBreakPropagateMask(totalMask, fromMask); + + /// + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BRKN Ptied2.B, Pg/Z, Pop1.B, Ptied2.B + /// + /// codegenarm64test: + /// IF_SVE_DC_3A BRKN .B, /Z, .B, .B + /// theEmitter->emitIns_R_R_R(INS_sve_brkn, EA_SCALABLE, REG_P0, REG_P8, REG_P15, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask) => CreateBreakPropagateMask(totalMask, fromMask); + + /// + /// svbool_t svbrkn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// BRKN Ptied2.B, Pg/Z, Pop1.B, Ptied2.B + /// + /// codegenarm64test: + /// IF_SVE_DC_3A BRKN .B, /Z, .B, .B + /// theEmitter->emitIns_R_R_R(INS_sve_brkn, EA_SCALABLE, REG_P0, REG_P8, REG_P15, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask) => CreateBreakPropagateMask(totalMask, fromMask); + + + /// CreateFalseMaskByte : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// PFALSE Presult.B + /// + /// codegenarm64test: + /// IF_SVE_DJ_1A PFALSE .B + /// theEmitter->emitIns_R(INS_sve_pfalse, EA_SCALABLE, REG_P13); + /// + public static unsafe Vector CreateFalseMaskByte() => CreateFalseMaskByte(); + + + /// CreateFalseMaskDouble : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// PFALSE Presult.B + /// + /// codegenarm64test: + /// IF_SVE_DJ_1A PFALSE .B + /// theEmitter->emitIns_R(INS_sve_pfalse, EA_SCALABLE, REG_P13); + /// + public static unsafe Vector CreateFalseMaskDouble() => CreateFalseMaskDouble(); + + + /// CreateFalseMaskInt16 : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// PFALSE Presult.B + /// + /// codegenarm64test: + /// IF_SVE_DJ_1A PFALSE .B + /// theEmitter->emitIns_R(INS_sve_pfalse, EA_SCALABLE, REG_P13); + /// + public static unsafe Vector CreateFalseMaskInt16() => CreateFalseMaskInt16(); + + + /// CreateFalseMaskInt32 : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// PFALSE Presult.B + /// + /// codegenarm64test: + /// IF_SVE_DJ_1A PFALSE .B + /// theEmitter->emitIns_R(INS_sve_pfalse, EA_SCALABLE, REG_P13); + /// + public static unsafe Vector CreateFalseMaskInt32() => CreateFalseMaskInt32(); + + + /// CreateFalseMaskInt64 : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// PFALSE Presult.B + /// + /// codegenarm64test: + /// IF_SVE_DJ_1A PFALSE .B + /// theEmitter->emitIns_R(INS_sve_pfalse, EA_SCALABLE, REG_P13); + /// + public static unsafe Vector CreateFalseMaskInt64() => CreateFalseMaskInt64(); + + + /// CreateFalseMaskSByte : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// PFALSE Presult.B + /// + /// codegenarm64test: + /// IF_SVE_DJ_1A PFALSE .B + /// theEmitter->emitIns_R(INS_sve_pfalse, EA_SCALABLE, REG_P13); + /// + public static unsafe Vector CreateFalseMaskSByte() => CreateFalseMaskSByte(); + + + /// CreateFalseMaskSingle : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// PFALSE Presult.B + /// + /// codegenarm64test: + /// IF_SVE_DJ_1A PFALSE .B + /// theEmitter->emitIns_R(INS_sve_pfalse, EA_SCALABLE, REG_P13); + /// + public static unsafe Vector CreateFalseMaskSingle() => CreateFalseMaskSingle(); + + + /// CreateFalseMaskUInt16 : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// PFALSE Presult.B + /// + /// codegenarm64test: + /// IF_SVE_DJ_1A PFALSE .B + /// theEmitter->emitIns_R(INS_sve_pfalse, EA_SCALABLE, REG_P13); + /// + public static unsafe Vector CreateFalseMaskUInt16() => CreateFalseMaskUInt16(); + + + /// CreateFalseMaskUInt32 : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// PFALSE Presult.B + /// + /// codegenarm64test: + /// IF_SVE_DJ_1A PFALSE .B + /// theEmitter->emitIns_R(INS_sve_pfalse, EA_SCALABLE, REG_P13); + /// + public static unsafe Vector CreateFalseMaskUInt32() => CreateFalseMaskUInt32(); + + + /// CreateFalseMaskUInt64 : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// PFALSE Presult.B + /// + /// codegenarm64test: + /// IF_SVE_DJ_1A PFALSE .B + /// theEmitter->emitIns_R(INS_sve_pfalse, EA_SCALABLE, REG_P13); + /// + public static unsafe Vector CreateFalseMaskUInt64() => CreateFalseMaskUInt64(); + + + /// CreateMaskForFirstActiveElement : Set the first active predicate element to true + + /// + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) + /// PFIRST Ptied.B, Pg, Ptied.B + /// + /// codegenarm64test: + /// IF_SVE_DD_2A PFIRST .B, , .B + /// theEmitter->emitIns_R_R(INS_sve_pfirst, EA_SCALABLE, REG_P0, REG_P15, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask) => CreateMaskForFirstActiveElement(totalMask, fromMask); + + /// + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) + /// PFIRST Ptied.B, Pg, Ptied.B + /// + /// codegenarm64test: + /// IF_SVE_DD_2A PFIRST .B, , .B + /// theEmitter->emitIns_R_R(INS_sve_pfirst, EA_SCALABLE, REG_P0, REG_P15, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask) => CreateMaskForFirstActiveElement(totalMask, fromMask); + + /// + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) + /// PFIRST Ptied.B, Pg, Ptied.B + /// + /// codegenarm64test: + /// IF_SVE_DD_2A PFIRST .B, , .B + /// theEmitter->emitIns_R_R(INS_sve_pfirst, EA_SCALABLE, REG_P0, REG_P15, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask) => CreateMaskForFirstActiveElement(totalMask, fromMask); + + /// + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) + /// PFIRST Ptied.B, Pg, Ptied.B + /// + /// codegenarm64test: + /// IF_SVE_DD_2A PFIRST .B, , .B + /// theEmitter->emitIns_R_R(INS_sve_pfirst, EA_SCALABLE, REG_P0, REG_P15, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask) => CreateMaskForFirstActiveElement(totalMask, fromMask); + + /// + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) + /// PFIRST Ptied.B, Pg, Ptied.B + /// + /// codegenarm64test: + /// IF_SVE_DD_2A PFIRST .B, , .B + /// theEmitter->emitIns_R_R(INS_sve_pfirst, EA_SCALABLE, REG_P0, REG_P15, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask) => CreateMaskForFirstActiveElement(totalMask, fromMask); + + /// + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) + /// PFIRST Ptied.B, Pg, Ptied.B + /// + /// codegenarm64test: + /// IF_SVE_DD_2A PFIRST .B, , .B + /// theEmitter->emitIns_R_R(INS_sve_pfirst, EA_SCALABLE, REG_P0, REG_P15, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask) => CreateMaskForFirstActiveElement(totalMask, fromMask); + + /// + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) + /// PFIRST Ptied.B, Pg, Ptied.B + /// + /// codegenarm64test: + /// IF_SVE_DD_2A PFIRST .B, , .B + /// theEmitter->emitIns_R_R(INS_sve_pfirst, EA_SCALABLE, REG_P0, REG_P15, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask) => CreateMaskForFirstActiveElement(totalMask, fromMask); + + /// + /// svbool_t svpfirst[_b](svbool_t pg, svbool_t op) + /// PFIRST Ptied.B, Pg, Ptied.B + /// + /// codegenarm64test: + /// IF_SVE_DD_2A PFIRST .B, , .B + /// theEmitter->emitIns_R_R(INS_sve_pfirst, EA_SCALABLE, REG_P0, REG_P15, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask) => CreateMaskForFirstActiveElement(totalMask, fromMask); + + + /// CreateMaskForNextActiveElement : Find next active predicate + + /// + /// svbool_t svpnext_b8(svbool_t pg, svbool_t op) + /// PNEXT Ptied.B, Pg, Ptied.B + /// + /// codegenarm64test: + /// IF_SVE_DF_2A PNEXT ., , . + /// theEmitter->emitIns_R_R(INS_sve_pnext, EA_SCALABLE, REG_P0, REG_P15, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateMaskForNextActiveElement(Vector totalMask, Vector fromMask) => CreateMaskForNextActiveElement(totalMask, fromMask); + + /// + /// svbool_t svpnext_b16(svbool_t pg, svbool_t op) + /// PNEXT Ptied.H, Pg, Ptied.H + /// + /// codegenarm64test: + /// IF_SVE_DF_2A PNEXT ., , . + /// theEmitter->emitIns_R_R(INS_sve_pnext, EA_SCALABLE, REG_P0, REG_P15, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateMaskForNextActiveElement(Vector totalMask, Vector fromMask) => CreateMaskForNextActiveElement(totalMask, fromMask); + + /// + /// svbool_t svpnext_b32(svbool_t pg, svbool_t op) + /// PNEXT Ptied.S, Pg, Ptied.S + /// + /// codegenarm64test: + /// IF_SVE_DF_2A PNEXT ., , . + /// theEmitter->emitIns_R_R(INS_sve_pnext, EA_SCALABLE, REG_P0, REG_P15, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateMaskForNextActiveElement(Vector totalMask, Vector fromMask) => CreateMaskForNextActiveElement(totalMask, fromMask); + + /// + /// svbool_t svpnext_b64(svbool_t pg, svbool_t op) + /// PNEXT Ptied.D, Pg, Ptied.D + /// + /// codegenarm64test: + /// IF_SVE_DF_2A PNEXT ., , . + /// theEmitter->emitIns_R_R(INS_sve_pnext, EA_SCALABLE, REG_P0, REG_P15, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CreateMaskForNextActiveElement(Vector totalMask, Vector fromMask) => CreateMaskForNextActiveElement(totalMask, fromMask); + + + + /// CreateTrueMaskByte : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b8(enum svpattern pattern) + /// PTRUE Presult.B, pattern + /// + /// codegenarm64test: + /// IF_SVE_DE_1A PTRUE .{, } + /// theEmitter->emitIns_R_PATTERN(INS_sve_ptrue, EA_SCALABLE, REG_P0, INS_OPTS_SCALABLE_B, SVE_PATTERN_POW2); + /// theEmitter->emitIns_R_PATTERN(INS_sve_ptrue, EA_SCALABLE, REG_P7, INS_OPTS_SCALABLE_H, SVE_PATTERN_MUL3); + /// IF_SVE_DZ_1A PTRUE . + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P10, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateTrueMaskByte([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => CreateTrueMaskByte(pattern); + + + /// CreateTrueMaskDouble : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b8(enum svpattern pattern) + /// PTRUE Presult.B, pattern + /// + /// codegenarm64test: + /// IF_SVE_DE_1A PTRUE .{, } + /// theEmitter->emitIns_R_PATTERN(INS_sve_ptrue, EA_SCALABLE, REG_P0, INS_OPTS_SCALABLE_B, SVE_PATTERN_POW2); + /// theEmitter->emitIns_R_PATTERN(INS_sve_ptrue, EA_SCALABLE, REG_P7, INS_OPTS_SCALABLE_H, SVE_PATTERN_MUL3); + /// IF_SVE_DZ_1A PTRUE . + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P10, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateTrueMaskDouble([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => CreateTrueMaskDouble(pattern); + + + /// CreateTrueMaskInt16 : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b8(enum svpattern pattern) + /// PTRUE Presult.B, pattern + /// + /// codegenarm64test: + /// IF_SVE_DE_1A PTRUE .{, } + /// theEmitter->emitIns_R_PATTERN(INS_sve_ptrue, EA_SCALABLE, REG_P0, INS_OPTS_SCALABLE_B, SVE_PATTERN_POW2); + /// theEmitter->emitIns_R_PATTERN(INS_sve_ptrue, EA_SCALABLE, REG_P7, INS_OPTS_SCALABLE_H, SVE_PATTERN_MUL3); + /// IF_SVE_DZ_1A PTRUE . + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P10, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateTrueMaskInt16([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => CreateTrueMaskInt16(pattern); + + + /// CreateTrueMaskInt32 : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b8(enum svpattern pattern) + /// PTRUE Presult.B, pattern + /// + /// codegenarm64test: + /// IF_SVE_DE_1A PTRUE .{, } + /// theEmitter->emitIns_R_PATTERN(INS_sve_ptrue, EA_SCALABLE, REG_P0, INS_OPTS_SCALABLE_B, SVE_PATTERN_POW2); + /// theEmitter->emitIns_R_PATTERN(INS_sve_ptrue, EA_SCALABLE, REG_P7, INS_OPTS_SCALABLE_H, SVE_PATTERN_MUL3); + /// IF_SVE_DZ_1A PTRUE . + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P10, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateTrueMaskInt32([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => CreateTrueMaskInt32(pattern); + + + /// CreateTrueMaskInt64 : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b8(enum svpattern pattern) + /// PTRUE Presult.B, pattern + /// + /// codegenarm64test: + /// IF_SVE_DE_1A PTRUE .{, } + /// theEmitter->emitIns_R_PATTERN(INS_sve_ptrue, EA_SCALABLE, REG_P0, INS_OPTS_SCALABLE_B, SVE_PATTERN_POW2); + /// theEmitter->emitIns_R_PATTERN(INS_sve_ptrue, EA_SCALABLE, REG_P7, INS_OPTS_SCALABLE_H, SVE_PATTERN_MUL3); + /// IF_SVE_DZ_1A PTRUE . + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P10, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateTrueMaskInt64([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => CreateTrueMaskInt64(pattern); + + + /// CreateTrueMaskSByte : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b8(enum svpattern pattern) + /// PTRUE Presult.B, pattern + /// + /// codegenarm64test: + /// IF_SVE_DE_1A PTRUE .{, } + /// theEmitter->emitIns_R_PATTERN(INS_sve_ptrue, EA_SCALABLE, REG_P0, INS_OPTS_SCALABLE_B, SVE_PATTERN_POW2); + /// theEmitter->emitIns_R_PATTERN(INS_sve_ptrue, EA_SCALABLE, REG_P7, INS_OPTS_SCALABLE_H, SVE_PATTERN_MUL3); + /// IF_SVE_DZ_1A PTRUE . + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P10, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateTrueMaskSByte([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => CreateTrueMaskSByte(pattern); + + + /// CreateTrueMaskSingle : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b8(enum svpattern pattern) + /// PTRUE Presult.B, pattern + /// + /// codegenarm64test: + /// IF_SVE_DE_1A PTRUE .{, } + /// theEmitter->emitIns_R_PATTERN(INS_sve_ptrue, EA_SCALABLE, REG_P0, INS_OPTS_SCALABLE_B, SVE_PATTERN_POW2); + /// theEmitter->emitIns_R_PATTERN(INS_sve_ptrue, EA_SCALABLE, REG_P7, INS_OPTS_SCALABLE_H, SVE_PATTERN_MUL3); + /// IF_SVE_DZ_1A PTRUE . + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P10, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateTrueMaskSingle([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => CreateTrueMaskSingle(pattern); + + + /// CreateTrueMaskUInt16 : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b16(enum svpattern pattern) + /// PTRUE Presult.H, pattern + /// + /// codegenarm64test: + /// IF_SVE_DE_1A PTRUE .{, } + /// theEmitter->emitIns_R_PATTERN(INS_sve_ptrue, EA_SCALABLE, REG_P0, INS_OPTS_SCALABLE_B, SVE_PATTERN_POW2); + /// theEmitter->emitIns_R_PATTERN(INS_sve_ptrue, EA_SCALABLE, REG_P7, INS_OPTS_SCALABLE_H, SVE_PATTERN_MUL3); + /// IF_SVE_DZ_1A PTRUE . + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P10, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateTrueMaskUInt16([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => CreateTrueMaskUInt16(pattern); + + + /// CreateTrueMaskUInt32 : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b32(enum svpattern pattern) + /// PTRUE Presult.S, pattern + /// + /// codegenarm64test: + /// IF_SVE_DE_1A PTRUE .{, } + /// theEmitter->emitIns_R_PATTERN(INS_sve_ptrue, EA_SCALABLE, REG_P0, INS_OPTS_SCALABLE_B, SVE_PATTERN_POW2); + /// theEmitter->emitIns_R_PATTERN(INS_sve_ptrue, EA_SCALABLE, REG_P7, INS_OPTS_SCALABLE_H, SVE_PATTERN_MUL3); + /// IF_SVE_DZ_1A PTRUE . + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P10, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateTrueMaskUInt32([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => CreateTrueMaskUInt32(pattern); + + + /// CreateTrueMaskUInt64 : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b64(enum svpattern pattern) + /// PTRUE Presult.D, pattern + /// + /// codegenarm64test: + /// IF_SVE_DE_1A PTRUE .{, } + /// theEmitter->emitIns_R_PATTERN(INS_sve_ptrue, EA_SCALABLE, REG_P0, INS_OPTS_SCALABLE_B, SVE_PATTERN_POW2); + /// theEmitter->emitIns_R_PATTERN(INS_sve_ptrue, EA_SCALABLE, REG_P7, INS_OPTS_SCALABLE_H, SVE_PATTERN_MUL3); + /// IF_SVE_DZ_1A PTRUE . + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P10, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateTrueMaskUInt64([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => CreateTrueMaskUInt64(pattern); + + + /// CreateWhileLessThanMask16Bit : While incrementing scalar is less than + + /// + /// svbool_t svwhilelt_b16[_s32](int32_t op1, int32_t op2) + /// WHILELT Presult.H, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELT ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_4BYTE, REG_P14, REG_R12, REG_R13, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P15, REG_R14, REG_R15, INS_OPTS_SCALABLE_D); + /// IF_SVE_DX_3A WHILELT {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELT ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P14, REG_R12, REG_R13, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P15, REG_R14, REG_R15, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanMask16Bit(int left, int right) => CreateWhileLessThanMask16Bit(left, right); + + /// + /// svbool_t svwhilelt_b16[_s64](int64_t op1, int64_t op2) + /// WHILELT Presult.H, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELT ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_4BYTE, REG_P14, REG_R12, REG_R13, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P15, REG_R14, REG_R15, INS_OPTS_SCALABLE_D); + /// IF_SVE_DX_3A WHILELT {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELT ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P14, REG_R12, REG_R13, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P15, REG_R14, REG_R15, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanMask16Bit(long left, long right) => CreateWhileLessThanMask16Bit(left, right); + + /// + /// svbool_t svwhilelt_b16[_u32](uint32_t op1, uint32_t op2) + /// WHILELO Presult.H, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELO ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_4BYTE, REG_P10, REG_R4, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P11, REG_R6, REG_R7, INS_OPTS_SCALABLE_S); + /// IF_SVE_DX_3A WHILELO {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELO ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P10, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P11, REG_R6, REG_R7, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanMask16Bit(uint left, uint right) => CreateWhileLessThanMask16Bit(left, right); + + /// + /// svbool_t svwhilelt_b16[_u64](uint64_t op1, uint64_t op2) + /// WHILELO Presult.H, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELO ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_4BYTE, REG_P10, REG_R4, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P11, REG_R6, REG_R7, INS_OPTS_SCALABLE_S); + /// IF_SVE_DX_3A WHILELO {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELO ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P10, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P11, REG_R6, REG_R7, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanMask16Bit(ulong left, ulong right) => CreateWhileLessThanMask16Bit(left, right); + + + /// CreateWhileLessThanMask32Bit : While incrementing scalar is less than + + /// + /// svbool_t svwhilelt_b32[_s32](int32_t op1, int32_t op2) + /// WHILELT Presult.S, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELT ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_4BYTE, REG_P14, REG_R12, REG_R13, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P15, REG_R14, REG_R15, INS_OPTS_SCALABLE_D); + /// IF_SVE_DX_3A WHILELT {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELT ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P14, REG_R12, REG_R13, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P15, REG_R14, REG_R15, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanMask32Bit(int left, int right) => CreateWhileLessThanMask32Bit(left, right); + + /// + /// svbool_t svwhilelt_b32[_s64](int64_t op1, int64_t op2) + /// WHILELT Presult.S, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELT ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_4BYTE, REG_P14, REG_R12, REG_R13, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P15, REG_R14, REG_R15, INS_OPTS_SCALABLE_D); + /// IF_SVE_DX_3A WHILELT {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELT ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P14, REG_R12, REG_R13, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P15, REG_R14, REG_R15, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanMask32Bit(long left, long right) => CreateWhileLessThanMask32Bit(left, right); + + /// + /// svbool_t svwhilelt_b32[_u32](uint32_t op1, uint32_t op2) + /// WHILELO Presult.S, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELO ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_4BYTE, REG_P10, REG_R4, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P11, REG_R6, REG_R7, INS_OPTS_SCALABLE_S); + /// IF_SVE_DX_3A WHILELO {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELO ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P10, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P11, REG_R6, REG_R7, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanMask32Bit(uint left, uint right) => CreateWhileLessThanMask32Bit(left, right); + + /// + /// svbool_t svwhilelt_b32[_u64](uint64_t op1, uint64_t op2) + /// WHILELO Presult.S, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELO ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_4BYTE, REG_P10, REG_R4, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P11, REG_R6, REG_R7, INS_OPTS_SCALABLE_S); + /// IF_SVE_DX_3A WHILELO {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELO ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P10, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P11, REG_R6, REG_R7, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanMask32Bit(ulong left, ulong right) => CreateWhileLessThanMask32Bit(left, right); + + + /// CreateWhileLessThanMask64Bit : While incrementing scalar is less than + + /// + /// svbool_t svwhilelt_b64[_s32](int32_t op1, int32_t op2) + /// WHILELT Presult.D, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELT ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_4BYTE, REG_P14, REG_R12, REG_R13, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P15, REG_R14, REG_R15, INS_OPTS_SCALABLE_D); + /// IF_SVE_DX_3A WHILELT {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELT ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P14, REG_R12, REG_R13, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P15, REG_R14, REG_R15, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanMask64Bit(int left, int right) => CreateWhileLessThanMask64Bit(left, right); + + /// + /// svbool_t svwhilelt_b64[_s64](int64_t op1, int64_t op2) + /// WHILELT Presult.D, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELT ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_4BYTE, REG_P14, REG_R12, REG_R13, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P15, REG_R14, REG_R15, INS_OPTS_SCALABLE_D); + /// IF_SVE_DX_3A WHILELT {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELT ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P14, REG_R12, REG_R13, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P15, REG_R14, REG_R15, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanMask64Bit(long left, long right) => CreateWhileLessThanMask64Bit(left, right); + + /// + /// svbool_t svwhilelt_b64[_u32](uint32_t op1, uint32_t op2) + /// WHILELO Presult.D, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELO ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_4BYTE, REG_P10, REG_R4, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P11, REG_R6, REG_R7, INS_OPTS_SCALABLE_S); + /// IF_SVE_DX_3A WHILELO {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELO ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P10, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P11, REG_R6, REG_R7, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanMask64Bit(uint left, uint right) => CreateWhileLessThanMask64Bit(left, right); + + /// + /// svbool_t svwhilelt_b64[_u64](uint64_t op1, uint64_t op2) + /// WHILELO Presult.D, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELO ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_4BYTE, REG_P10, REG_R4, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P11, REG_R6, REG_R7, INS_OPTS_SCALABLE_S); + /// IF_SVE_DX_3A WHILELO {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELO ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P10, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P11, REG_R6, REG_R7, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanMask64Bit(ulong left, ulong right) => CreateWhileLessThanMask64Bit(left, right); + + + /// CreateWhileLessThanMask8Bit : While incrementing scalar is less than + + /// + /// svbool_t svwhilelt_b8[_s32](int32_t op1, int32_t op2) + /// WHILELT Presult.B, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELT ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_4BYTE, REG_P14, REG_R12, REG_R13, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P15, REG_R14, REG_R15, INS_OPTS_SCALABLE_D); + /// IF_SVE_DX_3A WHILELT {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELT ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P14, REG_R12, REG_R13, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P15, REG_R14, REG_R15, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanMask8Bit(int left, int right) => CreateWhileLessThanMask8Bit(left, right); + + /// + /// svbool_t svwhilelt_b8[_s64](int64_t op1, int64_t op2) + /// WHILELT Presult.B, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELT ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_4BYTE, REG_P14, REG_R12, REG_R13, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P15, REG_R14, REG_R15, INS_OPTS_SCALABLE_D); + /// IF_SVE_DX_3A WHILELT {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELT ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P14, REG_R12, REG_R13, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelt, EA_8BYTE, REG_P15, REG_R14, REG_R15, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanMask8Bit(long left, long right) => CreateWhileLessThanMask8Bit(left, right); + + /// + /// svbool_t svwhilelt_b8[_u32](uint32_t op1, uint32_t op2) + /// WHILELO Presult.B, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELO ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_4BYTE, REG_P10, REG_R4, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P11, REG_R6, REG_R7, INS_OPTS_SCALABLE_S); + /// IF_SVE_DX_3A WHILELO {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELO ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P10, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P11, REG_R6, REG_R7, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanMask8Bit(uint left, uint right) => CreateWhileLessThanMask8Bit(left, right); + + /// + /// svbool_t svwhilelt_b8[_u64](uint64_t op1, uint64_t op2) + /// WHILELO Presult.B, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELO ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_4BYTE, REG_P10, REG_R4, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P11, REG_R6, REG_R7, INS_OPTS_SCALABLE_S); + /// IF_SVE_DX_3A WHILELO {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELO ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P10, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilelo, EA_8BYTE, REG_P11, REG_R6, REG_R7, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanMask8Bit(ulong left, ulong right) => CreateWhileLessThanMask8Bit(left, right); + + + /// CreateWhileLessThanOrEqualMask16Bit : While incrementing scalar is less than or equal to + + /// + /// svbool_t svwhilele_b16[_s32](int32_t op1, int32_t op2) + /// WHILELE Presult.H, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELE ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_4BYTE, REG_P8, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P9, REG_R2, REG_R3, INS_OPTS_SCALABLE_S); + /// IF_SVE_DX_3A WHILELE {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELE ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P8, REG_R0, REG_R1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P9, REG_R2, REG_R3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask16Bit(int left, int right) => CreateWhileLessThanOrEqualMask16Bit(left, right); + + /// + /// svbool_t svwhilele_b16[_s64](int64_t op1, int64_t op2) + /// WHILELE Presult.H, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELE ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_4BYTE, REG_P8, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P9, REG_R2, REG_R3, INS_OPTS_SCALABLE_S); + /// IF_SVE_DX_3A WHILELE {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELE ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P8, REG_R0, REG_R1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P9, REG_R2, REG_R3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask16Bit(long left, long right) => CreateWhileLessThanOrEqualMask16Bit(left, right); + + /// + /// svbool_t svwhilele_b16[_u32](uint32_t op1, uint32_t op2) + /// WHILELS Presult.H, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELS ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_4BYTE, REG_P12, REG_R8, REG_R9, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P13, REG_R10, REG_R11, INS_OPTS_SCALABLE_D); + /// IF_SVE_DX_3A WHILELS {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELS ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P12, REG_R8, REG_R9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P13, REG_R10, REG_R11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask16Bit(uint left, uint right) => CreateWhileLessThanOrEqualMask16Bit(left, right); + + /// + /// svbool_t svwhilele_b16[_u64](uint64_t op1, uint64_t op2) + /// WHILELS Presult.H, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELS ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_4BYTE, REG_P12, REG_R8, REG_R9, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P13, REG_R10, REG_R11, INS_OPTS_SCALABLE_D); + /// IF_SVE_DX_3A WHILELS {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELS ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P12, REG_R8, REG_R9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P13, REG_R10, REG_R11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask16Bit(ulong left, ulong right) => CreateWhileLessThanOrEqualMask16Bit(left, right); + + + /// CreateWhileLessThanOrEqualMask32Bit : While incrementing scalar is less than or equal to + + /// + /// svbool_t svwhilele_b32[_s32](int32_t op1, int32_t op2) + /// WHILELE Presult.S, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELE ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_4BYTE, REG_P8, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P9, REG_R2, REG_R3, INS_OPTS_SCALABLE_S); + /// IF_SVE_DX_3A WHILELE {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELE ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P8, REG_R0, REG_R1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P9, REG_R2, REG_R3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask32Bit(int left, int right) => CreateWhileLessThanOrEqualMask32Bit(left, right); + + /// + /// svbool_t svwhilele_b32[_s64](int64_t op1, int64_t op2) + /// WHILELE Presult.S, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELE ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_4BYTE, REG_P8, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P9, REG_R2, REG_R3, INS_OPTS_SCALABLE_S); + /// IF_SVE_DX_3A WHILELE {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELE ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P8, REG_R0, REG_R1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P9, REG_R2, REG_R3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask32Bit(long left, long right) => CreateWhileLessThanOrEqualMask32Bit(left, right); + + /// + /// svbool_t svwhilele_b32[_u32](uint32_t op1, uint32_t op2) + /// WHILELS Presult.S, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELS ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_4BYTE, REG_P12, REG_R8, REG_R9, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P13, REG_R10, REG_R11, INS_OPTS_SCALABLE_D); + /// IF_SVE_DX_3A WHILELS {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELS ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P12, REG_R8, REG_R9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P13, REG_R10, REG_R11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask32Bit(uint left, uint right) => CreateWhileLessThanOrEqualMask32Bit(left, right); + + /// + /// svbool_t svwhilele_b32[_u64](uint64_t op1, uint64_t op2) + /// WHILELS Presult.S, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELS ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_4BYTE, REG_P12, REG_R8, REG_R9, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P13, REG_R10, REG_R11, INS_OPTS_SCALABLE_D); + /// IF_SVE_DX_3A WHILELS {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELS ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P12, REG_R8, REG_R9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P13, REG_R10, REG_R11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask32Bit(ulong left, ulong right) => CreateWhileLessThanOrEqualMask32Bit(left, right); + + + /// CreateWhileLessThanOrEqualMask64Bit : While incrementing scalar is less than or equal to + + /// + /// svbool_t svwhilele_b64[_s32](int32_t op1, int32_t op2) + /// WHILELE Presult.D, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELE ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_4BYTE, REG_P8, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P9, REG_R2, REG_R3, INS_OPTS_SCALABLE_S); + /// IF_SVE_DX_3A WHILELE {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELE ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P8, REG_R0, REG_R1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P9, REG_R2, REG_R3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask64Bit(int left, int right) => CreateWhileLessThanOrEqualMask64Bit(left, right); + + /// + /// svbool_t svwhilele_b64[_s64](int64_t op1, int64_t op2) + /// WHILELE Presult.D, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELE ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_4BYTE, REG_P8, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P9, REG_R2, REG_R3, INS_OPTS_SCALABLE_S); + /// IF_SVE_DX_3A WHILELE {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELE ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P8, REG_R0, REG_R1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P9, REG_R2, REG_R3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask64Bit(long left, long right) => CreateWhileLessThanOrEqualMask64Bit(left, right); + + /// + /// svbool_t svwhilele_b64[_u32](uint32_t op1, uint32_t op2) + /// WHILELS Presult.D, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELS ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_4BYTE, REG_P12, REG_R8, REG_R9, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P13, REG_R10, REG_R11, INS_OPTS_SCALABLE_D); + /// IF_SVE_DX_3A WHILELS {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELS ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P12, REG_R8, REG_R9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P13, REG_R10, REG_R11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask64Bit(uint left, uint right) => CreateWhileLessThanOrEqualMask64Bit(left, right); + + /// + /// svbool_t svwhilele_b64[_u64](uint64_t op1, uint64_t op2) + /// WHILELS Presult.D, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELS ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_4BYTE, REG_P12, REG_R8, REG_R9, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P13, REG_R10, REG_R11, INS_OPTS_SCALABLE_D); + /// IF_SVE_DX_3A WHILELS {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELS ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P12, REG_R8, REG_R9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P13, REG_R10, REG_R11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask64Bit(ulong left, ulong right) => CreateWhileLessThanOrEqualMask64Bit(left, right); + + + /// CreateWhileLessThanOrEqualMask8Bit : While incrementing scalar is less than or equal to + + /// + /// svbool_t svwhilele_b8[_s32](int32_t op1, int32_t op2) + /// WHILELE Presult.B, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELE ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_4BYTE, REG_P8, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P9, REG_R2, REG_R3, INS_OPTS_SCALABLE_S); + /// IF_SVE_DX_3A WHILELE {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELE ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P8, REG_R0, REG_R1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P9, REG_R2, REG_R3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask8Bit(int left, int right) => CreateWhileLessThanOrEqualMask8Bit(left, right); + + /// + /// svbool_t svwhilele_b8[_s64](int64_t op1, int64_t op2) + /// WHILELE Presult.B, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELE ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_4BYTE, REG_P8, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P9, REG_R2, REG_R3, INS_OPTS_SCALABLE_S); + /// IF_SVE_DX_3A WHILELE {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELE ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P8, REG_R0, REG_R1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilele, EA_8BYTE, REG_P9, REG_R2, REG_R3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask8Bit(long left, long right) => CreateWhileLessThanOrEqualMask8Bit(left, right); + + /// + /// svbool_t svwhilele_b8[_u32](uint32_t op1, uint32_t op2) + /// WHILELS Presult.B, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELS ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_4BYTE, REG_P12, REG_R8, REG_R9, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P13, REG_R10, REG_R11, INS_OPTS_SCALABLE_D); + /// IF_SVE_DX_3A WHILELS {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELS ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P12, REG_R8, REG_R9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P13, REG_R10, REG_R11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask8Bit(uint left, uint right) => CreateWhileLessThanOrEqualMask8Bit(left, right); + + /// + /// svbool_t svwhilele_b8[_u64](uint64_t op1, uint64_t op2) + /// WHILELS Presult.B, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILELS ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_4BYTE, REG_P12, REG_R8, REG_R9, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P13, REG_R10, REG_R11, INS_OPTS_SCALABLE_D); + /// IF_SVE_DX_3A WHILELS {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILELS ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P12, REG_R8, REG_R9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilels, EA_8BYTE, REG_P13, REG_R10, REG_R11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileLessThanOrEqualMask8Bit(ulong left, ulong right) => CreateWhileLessThanOrEqualMask8Bit(left, right); + + + /// Divide : Divide + + /// + /// svint32_t svdiv[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// SDIV Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; SDIV Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svdiv[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// SDIV Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// SDIVR Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; SDIV Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svdiv[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; SDIV Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; SDIVR Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_AC_3A SDIV ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sdiv, EA_SCALABLE, REG_V3, REG_P2, REG_V9, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Divide(Vector left, Vector right) => Divide(left, right); + + /// + /// svint64_t svdiv[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// SDIV Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; SDIV Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svdiv[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// SDIV Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// SDIVR Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; SDIV Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svdiv[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; SDIV Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; SDIVR Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_AC_3A SDIV ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sdiv, EA_SCALABLE, REG_V3, REG_P2, REG_V9, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Divide(Vector left, Vector right) => Divide(left, right); + + /// + /// svuint32_t svdiv[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// UDIV Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; UDIV Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svdiv[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// UDIV Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// UDIVR Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; UDIV Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svdiv[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; UDIV Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; UDIVR Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_AC_3A UDIV ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_udiv, EA_SCALABLE, REG_V1, REG_P0, REG_V0, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Divide(Vector left, Vector right) => Divide(left, right); + + /// + /// svuint64_t svdiv[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// UDIV Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; UDIV Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svdiv[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// UDIV Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// UDIVR Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; UDIV Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svdiv[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; UDIV Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; UDIVR Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_AC_3A UDIV ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_udiv, EA_SCALABLE, REG_V1, REG_P0, REG_V0, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Divide(Vector left, Vector right) => Divide(left, right); + + /// + /// svfloat32_t svdiv[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FDIV Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; FDIV Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svfloat32_t svdiv[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FDIV Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// FDIVR Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; FDIV Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svfloat32_t svdiv[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; FDIV Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; FDIVR Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FDIV ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fdiv, EA_SCALABLE, REG_V28, REG_P0, REG_V7, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Divide(Vector left, Vector right) => Divide(left, right); + + /// + /// svfloat64_t svdiv[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FDIV Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; FDIV Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svfloat64_t svdiv[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FDIV Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// FDIVR Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; FDIV Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svfloat64_t svdiv[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; FDIV Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; FDIVR Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FDIV ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fdiv, EA_SCALABLE, REG_V28, REG_P0, REG_V7, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Divide(Vector left, Vector right) => Divide(left, right); + + + + /// DotProduct : Dot product + + /// + /// svint32_t svdot[_s32](svint32_t op1, svint8_t op2, svint8_t op3) + /// SDOT Ztied1.S, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; SDOT Zresult.S, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_EH_3A SDOT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sdot, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_sdot, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_EG_3A SDOT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sdot, EA_SCALABLE, REG_V1, REG_V2, REG_V0, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sdot, EA_SCALABLE, REG_V3, REG_V4, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// IF_SVE_EY_3A SDOT .S, .B, .B[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sdot, EA_SCALABLE, REG_V9, REG_V10, REG_V4, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sdot, EA_SCALABLE, REG_V11, REG_V12, REG_V5, 1, INS_OPTS_SCALABLE_B); + /// IF_SVE_EY_3B SDOT .D, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sdot, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sdot, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1); + /// IF_SVE_EF_3A SDOT .S, .H, .H + /// theEmitter->emitIns_R_R_R(INS_sve_sdot, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector DotProduct(Vector addend, Vector left, Vector right) => DotProduct(addend, left, right); + + /// + /// svint64_t svdot[_s64](svint64_t op1, svint16_t op2, svint16_t op3) + /// SDOT Ztied1.D, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; SDOT Zresult.D, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_EH_3A SDOT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sdot, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_sdot, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_EG_3A SDOT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sdot, EA_SCALABLE, REG_V1, REG_V2, REG_V0, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sdot, EA_SCALABLE, REG_V3, REG_V4, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// IF_SVE_EY_3A SDOT .S, .B, .B[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sdot, EA_SCALABLE, REG_V9, REG_V10, REG_V4, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sdot, EA_SCALABLE, REG_V11, REG_V12, REG_V5, 1, INS_OPTS_SCALABLE_B); + /// IF_SVE_EY_3B SDOT .D, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sdot, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sdot, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1); + /// IF_SVE_EF_3A SDOT .S, .H, .H + /// theEmitter->emitIns_R_R_R(INS_sve_sdot, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector DotProduct(Vector addend, Vector left, Vector right) => DotProduct(addend, left, right); + + /// + /// svuint32_t svdot[_u32](svuint32_t op1, svuint8_t op2, svuint8_t op3) + /// UDOT Ztied1.S, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; UDOT Zresult.S, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_EH_3A UDOT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_udot, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_udot, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// IF_SVE_EG_3A UDOT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_udot, EA_SCALABLE, REG_V5, REG_V6, REG_V2, 2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_udot, EA_SCALABLE, REG_V7, REG_V8, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_EY_3A UDOT .S, .B, .B[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_udot, EA_SCALABLE, REG_V13, REG_V14, REG_V6, 2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_udot, EA_SCALABLE, REG_V15, REG_V16, REG_V7, 3, INS_OPTS_SCALABLE_B); + /// IF_SVE_EY_3B UDOT .D, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_udot, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0); + /// theEmitter->emitIns_R_R_R_I(INS_sve_udot, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1); + /// IF_SVE_EF_3A UDOT .S, .H, .H + /// theEmitter->emitIns_R_R_R(INS_sve_udot, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector DotProduct(Vector addend, Vector left, Vector right) => DotProduct(addend, left, right); + + /// + /// svuint64_t svdot[_u64](svuint64_t op1, svuint16_t op2, svuint16_t op3) + /// UDOT Ztied1.D, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; UDOT Zresult.D, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_EH_3A UDOT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_udot, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_udot, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// IF_SVE_EG_3A UDOT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_udot, EA_SCALABLE, REG_V5, REG_V6, REG_V2, 2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_udot, EA_SCALABLE, REG_V7, REG_V8, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_EY_3A UDOT .S, .B, .B[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_udot, EA_SCALABLE, REG_V13, REG_V14, REG_V6, 2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_udot, EA_SCALABLE, REG_V15, REG_V16, REG_V7, 3, INS_OPTS_SCALABLE_B); + /// IF_SVE_EY_3B UDOT .D, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_udot, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0); + /// theEmitter->emitIns_R_R_R_I(INS_sve_udot, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1); + /// IF_SVE_EF_3A UDOT .S, .H, .H + /// theEmitter->emitIns_R_R_R(INS_sve_udot, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector DotProduct(Vector addend, Vector left, Vector right) => DotProduct(addend, left, right); + + + /// DotProductBySelectedScalar : Dot product + + /// + /// svint32_t svdot_lane[_s32](svint32_t op1, svint8_t op2, svint8_t op3, uint64_t imm_index) + /// SDOT Ztied1.S, Zop2.B, Zop3.B[imm_index] + /// MOVPRFX Zresult, Zop1; SDOT Zresult.S, Zop2.B, Zop3.B[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EH_3A SDOT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sdot, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_sdot, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_EG_3A SDOT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sdot, EA_SCALABLE, REG_V1, REG_V2, REG_V0, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sdot, EA_SCALABLE, REG_V3, REG_V4, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// IF_SVE_EY_3A SDOT .S, .B, .B[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sdot, EA_SCALABLE, REG_V9, REG_V10, REG_V4, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sdot, EA_SCALABLE, REG_V11, REG_V12, REG_V5, 1, INS_OPTS_SCALABLE_B); + /// IF_SVE_EY_3B SDOT .D, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sdot, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sdot, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1); + /// IF_SVE_EF_3A SDOT .S, .H, .H + /// theEmitter->emitIns_R_R_R(INS_sve_sdot, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector DotProductBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => DotProductBySelectedScalar(addend, left, right, rightIndex); + + /// + /// svint64_t svdot_lane[_s64](svint64_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// SDOT Ztied1.D, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; SDOT Zresult.D, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EH_3A SDOT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sdot, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_sdot, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_EG_3A SDOT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sdot, EA_SCALABLE, REG_V1, REG_V2, REG_V0, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sdot, EA_SCALABLE, REG_V3, REG_V4, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// IF_SVE_EY_3A SDOT .S, .B, .B[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sdot, EA_SCALABLE, REG_V9, REG_V10, REG_V4, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sdot, EA_SCALABLE, REG_V11, REG_V12, REG_V5, 1, INS_OPTS_SCALABLE_B); + /// IF_SVE_EY_3B SDOT .D, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sdot, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sdot, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1); + /// IF_SVE_EF_3A SDOT .S, .H, .H + /// theEmitter->emitIns_R_R_R(INS_sve_sdot, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector DotProductBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => DotProductBySelectedScalar(addend, left, right, rightIndex); + + /// + /// svuint32_t svdot_lane[_u32](svuint32_t op1, svuint8_t op2, svuint8_t op3, uint64_t imm_index) + /// UDOT Ztied1.S, Zop2.B, Zop3.B[imm_index] + /// MOVPRFX Zresult, Zop1; UDOT Zresult.S, Zop2.B, Zop3.B[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EH_3A UDOT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_udot, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_udot, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// IF_SVE_EG_3A UDOT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_udot, EA_SCALABLE, REG_V5, REG_V6, REG_V2, 2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_udot, EA_SCALABLE, REG_V7, REG_V8, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_EY_3A UDOT .S, .B, .B[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_udot, EA_SCALABLE, REG_V13, REG_V14, REG_V6, 2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_udot, EA_SCALABLE, REG_V15, REG_V16, REG_V7, 3, INS_OPTS_SCALABLE_B); + /// IF_SVE_EY_3B UDOT .D, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_udot, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0); + /// theEmitter->emitIns_R_R_R_I(INS_sve_udot, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1); + /// IF_SVE_EF_3A UDOT .S, .H, .H + /// theEmitter->emitIns_R_R_R(INS_sve_udot, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector DotProductBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => DotProductBySelectedScalar(addend, left, right, rightIndex); + + /// + /// svuint64_t svdot_lane[_u64](svuint64_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) + /// UDOT Ztied1.D, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; UDOT Zresult.D, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EH_3A UDOT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_udot, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_udot, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// IF_SVE_EG_3A UDOT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_udot, EA_SCALABLE, REG_V5, REG_V6, REG_V2, 2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_udot, EA_SCALABLE, REG_V7, REG_V8, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_EY_3A UDOT .S, .B, .B[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_udot, EA_SCALABLE, REG_V13, REG_V14, REG_V6, 2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_udot, EA_SCALABLE, REG_V15, REG_V16, REG_V7, 3, INS_OPTS_SCALABLE_B); + /// IF_SVE_EY_3B UDOT .D, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_udot, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0); + /// theEmitter->emitIns_R_R_R_I(INS_sve_udot, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1); + /// IF_SVE_EF_3A UDOT .S, .H, .H + /// theEmitter->emitIns_R_R_R(INS_sve_udot, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector DotProductBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => DotProductBySelectedScalar(addend, left, right, rightIndex); + + + /// DuplicateSelectedScalarToVector : Broadcast a scalar value + + /// + /// svint8_t svdup_lane[_s8](svint8_t data, uint8_t index) + /// DUP Zresult.B, Zdata.B[index] + /// TBL Zresult.B, Zdata.B, Zindex.B + /// svint8_t svdupq_lane[_s8](svint8_t data, uint64_t index) + /// DUP Zresult.Q, Zdata.Q[index] + /// TBL Zresult.D, Zdata.D, Zindices_d.D + /// + /// codegenarm64test: + /// IF_SVE_EB_1A DUP ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// IF_SVE_CB_2A DUP ., + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V2, REG_R3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V1, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_8BYTE, REG_V4, REG_SP, INS_OPTS_SCALABLE_D); + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(data, index); + + /// + /// svint16_t svdup_lane[_s16](svint16_t data, uint16_t index) + /// DUP Zresult.H, Zdata.H[index] + /// TBL Zresult.H, Zdata.H, Zindex.H + /// svint16_t svdupq_lane[_s16](svint16_t data, uint64_t index) + /// DUP Zresult.Q, Zdata.Q[index] + /// TBL Zresult.D, Zdata.D, Zindices_d.D + /// + /// codegenarm64test: + /// IF_SVE_EB_1A DUP ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// IF_SVE_CB_2A DUP ., + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V2, REG_R3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V1, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_8BYTE, REG_V4, REG_SP, INS_OPTS_SCALABLE_D); + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(data, index); + + /// + /// svint32_t svdup_lane[_s32](svint32_t data, uint32_t index) + /// DUP Zresult.S, Zdata.S[index] + /// TBL Zresult.S, Zdata.S, Zindex.S + /// svint32_t svdupq_lane[_s32](svint32_t data, uint64_t index) + /// DUP Zresult.Q, Zdata.Q[index] + /// TBL Zresult.D, Zdata.D, Zindices_d.D + /// + /// codegenarm64test: + /// IF_SVE_EB_1A DUP ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// IF_SVE_CB_2A DUP ., + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V2, REG_R3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V1, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_8BYTE, REG_V4, REG_SP, INS_OPTS_SCALABLE_D); + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(data, index); + + /// + /// svint64_t svdup_lane[_s64](svint64_t data, uint64_t index) + /// DUP Zresult.D, Zdata.D[index] + /// TBL Zresult.D, Zdata.D, Zindex.D + /// svint64_t svdupq_lane[_s64](svint64_t data, uint64_t index) + /// DUP Zresult.Q, Zdata.Q[index] + /// TBL Zresult.D, Zdata.D, Zindices_d.D + /// + /// codegenarm64test: + /// IF_SVE_EB_1A DUP ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// IF_SVE_CB_2A DUP ., + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V2, REG_R3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V1, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_8BYTE, REG_V4, REG_SP, INS_OPTS_SCALABLE_D); + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(data, index); + + /// + /// svuint8_t svdup_lane[_u8](svuint8_t data, uint8_t index) + /// DUP Zresult.B, Zdata.B[index] + /// TBL Zresult.B, Zdata.B, Zindex.B + /// svuint8_t svdupq_lane[_u8](svuint8_t data, uint64_t index) + /// DUP Zresult.Q, Zdata.Q[index] + /// TBL Zresult.D, Zdata.D, Zindices_d.D + /// + /// codegenarm64test: + /// IF_SVE_EB_1A DUP ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// IF_SVE_CB_2A DUP ., + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V2, REG_R3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V1, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_8BYTE, REG_V4, REG_SP, INS_OPTS_SCALABLE_D); + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(data, index); + + /// + /// svuint16_t svdup_lane[_u16](svuint16_t data, uint16_t index) + /// DUP Zresult.H, Zdata.H[index] + /// TBL Zresult.H, Zdata.H, Zindex.H + /// svuint16_t svdupq_lane[_u16](svuint16_t data, uint64_t index) + /// DUP Zresult.Q, Zdata.Q[index] + /// TBL Zresult.D, Zdata.D, Zindices_d.D + /// + /// codegenarm64test: + /// IF_SVE_EB_1A DUP ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// IF_SVE_CB_2A DUP ., + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V2, REG_R3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V1, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_8BYTE, REG_V4, REG_SP, INS_OPTS_SCALABLE_D); + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(data, index); + + /// + /// svuint32_t svdup_lane[_u32](svuint32_t data, uint32_t index) + /// DUP Zresult.S, Zdata.S[index] + /// TBL Zresult.S, Zdata.S, Zindex.S + /// svuint32_t svdupq_lane[_u32](svuint32_t data, uint64_t index) + /// DUP Zresult.Q, Zdata.Q[index] + /// TBL Zresult.D, Zdata.D, Zindices_d.D + /// + /// codegenarm64test: + /// IF_SVE_EB_1A DUP ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// IF_SVE_CB_2A DUP ., + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V2, REG_R3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V1, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_8BYTE, REG_V4, REG_SP, INS_OPTS_SCALABLE_D); + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(data, index); + + /// + /// svuint64_t svdup_lane[_u64](svuint64_t data, uint64_t index) + /// DUP Zresult.D, Zdata.D[index] + /// TBL Zresult.D, Zdata.D, Zindex.D + /// svuint64_t svdupq_lane[_u64](svuint64_t data, uint64_t index) + /// DUP Zresult.Q, Zdata.Q[index] + /// TBL Zresult.D, Zdata.D, Zindices_d.D + /// + /// codegenarm64test: + /// IF_SVE_EB_1A DUP ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// IF_SVE_CB_2A DUP ., + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V2, REG_R3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V1, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_8BYTE, REG_V4, REG_SP, INS_OPTS_SCALABLE_D); + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(data, index); + + /// + /// svfloat32_t svdup_lane[_f32](svfloat32_t data, uint32_t index) + /// DUP Zresult.S, Zdata.S[index] + /// TBL Zresult.S, Zdata.S, Zindex.S + /// svfloat32_t svdupq_lane[_f32](svfloat32_t data, uint64_t index) + /// DUP Zresult.Q, Zdata.Q[index] + /// TBL Zresult.D, Zdata.D, Zindices_d.D + /// + /// codegenarm64test: + /// IF_SVE_EB_1A DUP ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// IF_SVE_CB_2A DUP ., + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V2, REG_R3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V1, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_8BYTE, REG_V4, REG_SP, INS_OPTS_SCALABLE_D); + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(data, index); + + /// + /// svfloat64_t svdup_lane[_f64](svfloat64_t data, uint64_t index) + /// DUP Zresult.D, Zdata.D[index] + /// TBL Zresult.D, Zdata.D, Zindex.D + /// svfloat64_t svdupq_lane[_f64](svfloat64_t data, uint64_t index) + /// DUP Zresult.Q, Zdata.Q[index] + /// TBL Zresult.D, Zdata.D, Zindices_d.D + /// + /// codegenarm64test: + /// IF_SVE_EB_1A DUP ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// IF_SVE_CB_2A DUP ., + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V2, REG_R3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V1, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_8BYTE, REG_V4, REG_SP, INS_OPTS_SCALABLE_D); + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(data, index); + + + /// ExtractAfterLastScalar : Extract element after last + + /// + /// int8_t svlasta[_s8](svbool_t pg, svint8_t op) + /// LASTA Wresult, Pg, Zop.B + /// LASTA Bresult, Pg, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_1BYTE, REG_V6, REG_P1, REG_V27, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_2BYTE, REG_V5, REG_P2, REG_V26, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R1, REG_P5, REG_V23, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R0, REG_P6, REG_V22, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe sbyte ExtractAfterLastScalar(Vector value) => ExtractAfterLastScalar(value); + + /// + /// int16_t svlasta[_s16](svbool_t pg, svint16_t op) + /// LASTA Wresult, Pg, Zop.H + /// LASTA Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_1BYTE, REG_V6, REG_P1, REG_V27, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_2BYTE, REG_V5, REG_P2, REG_V26, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R1, REG_P5, REG_V23, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R0, REG_P6, REG_V22, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe short ExtractAfterLastScalar(Vector value) => ExtractAfterLastScalar(value); + + /// + /// int32_t svlasta[_s32](svbool_t pg, svint32_t op) + /// LASTA Wresult, Pg, Zop.S + /// LASTA Sresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_1BYTE, REG_V6, REG_P1, REG_V27, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_2BYTE, REG_V5, REG_P2, REG_V26, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R1, REG_P5, REG_V23, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R0, REG_P6, REG_V22, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe int ExtractAfterLastScalar(Vector value) => ExtractAfterLastScalar(value); + + /// + /// int64_t svlasta[_s64](svbool_t pg, svint64_t op) + /// LASTA Xresult, Pg, Zop.D + /// LASTA Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_1BYTE, REG_V6, REG_P1, REG_V27, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_2BYTE, REG_V5, REG_P2, REG_V26, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R1, REG_P5, REG_V23, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R0, REG_P6, REG_V22, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe long ExtractAfterLastScalar(Vector value) => ExtractAfterLastScalar(value); + + /// + /// uint8_t svlasta[_u8](svbool_t pg, svuint8_t op) + /// LASTA Wresult, Pg, Zop.B + /// LASTA Bresult, Pg, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_1BYTE, REG_V6, REG_P1, REG_V27, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_2BYTE, REG_V5, REG_P2, REG_V26, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R1, REG_P5, REG_V23, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R0, REG_P6, REG_V22, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe byte ExtractAfterLastScalar(Vector value) => ExtractAfterLastScalar(value); + + /// + /// uint16_t svlasta[_u16](svbool_t pg, svuint16_t op) + /// LASTA Wresult, Pg, Zop.H + /// LASTA Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_1BYTE, REG_V6, REG_P1, REG_V27, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_2BYTE, REG_V5, REG_P2, REG_V26, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R1, REG_P5, REG_V23, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R0, REG_P6, REG_V22, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe ushort ExtractAfterLastScalar(Vector value) => ExtractAfterLastScalar(value); + + /// + /// uint32_t svlasta[_u32](svbool_t pg, svuint32_t op) + /// LASTA Wresult, Pg, Zop.S + /// LASTA Sresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_1BYTE, REG_V6, REG_P1, REG_V27, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_2BYTE, REG_V5, REG_P2, REG_V26, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R1, REG_P5, REG_V23, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R0, REG_P6, REG_V22, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe uint ExtractAfterLastScalar(Vector value) => ExtractAfterLastScalar(value); + + /// + /// uint64_t svlasta[_u64](svbool_t pg, svuint64_t op) + /// LASTA Xresult, Pg, Zop.D + /// LASTA Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_1BYTE, REG_V6, REG_P1, REG_V27, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_2BYTE, REG_V5, REG_P2, REG_V26, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R1, REG_P5, REG_V23, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R0, REG_P6, REG_V22, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe ulong ExtractAfterLastScalar(Vector value) => ExtractAfterLastScalar(value); + + /// + /// float32_t svlasta[_f32](svbool_t pg, svfloat32_t op) + /// LASTA Wresult, Pg, Zop.S + /// LASTA Sresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_1BYTE, REG_V6, REG_P1, REG_V27, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_2BYTE, REG_V5, REG_P2, REG_V26, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R1, REG_P5, REG_V23, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R0, REG_P6, REG_V22, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe float ExtractAfterLastScalar(Vector value) => ExtractAfterLastScalar(value); + + /// + /// float64_t svlasta[_f64](svbool_t pg, svfloat64_t op) + /// LASTA Xresult, Pg, Zop.D + /// LASTA Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_1BYTE, REG_V6, REG_P1, REG_V27, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_2BYTE, REG_V5, REG_P2, REG_V26, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R1, REG_P5, REG_V23, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R0, REG_P6, REG_V22, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe double ExtractAfterLastScalar(Vector value) => ExtractAfterLastScalar(value); + + + /// ExtractAfterLastVector : Extract element after last + + /// + /// int8_t svlasta[_s8](svbool_t pg, svint8_t op) + /// LASTA Wresult, Pg, Zop.B + /// LASTA Bresult, Pg, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_1BYTE, REG_V6, REG_P1, REG_V27, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_2BYTE, REG_V5, REG_P2, REG_V26, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R1, REG_P5, REG_V23, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R0, REG_P6, REG_V22, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) => ExtractAfterLastVector(value); + + /// + /// int16_t svlasta[_s16](svbool_t pg, svint16_t op) + /// LASTA Wresult, Pg, Zop.H + /// LASTA Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_1BYTE, REG_V6, REG_P1, REG_V27, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_2BYTE, REG_V5, REG_P2, REG_V26, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R1, REG_P5, REG_V23, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R0, REG_P6, REG_V22, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) => ExtractAfterLastVector(value); + + /// + /// int32_t svlasta[_s32](svbool_t pg, svint32_t op) + /// LASTA Wresult, Pg, Zop.S + /// LASTA Sresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_1BYTE, REG_V6, REG_P1, REG_V27, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_2BYTE, REG_V5, REG_P2, REG_V26, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R1, REG_P5, REG_V23, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R0, REG_P6, REG_V22, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) => ExtractAfterLastVector(value); + + /// + /// int64_t svlasta[_s64](svbool_t pg, svint64_t op) + /// LASTA Xresult, Pg, Zop.D + /// LASTA Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_1BYTE, REG_V6, REG_P1, REG_V27, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_2BYTE, REG_V5, REG_P2, REG_V26, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R1, REG_P5, REG_V23, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R0, REG_P6, REG_V22, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) => ExtractAfterLastVector(value); + + /// + /// uint8_t svlasta[_u8](svbool_t pg, svuint8_t op) + /// LASTA Wresult, Pg, Zop.B + /// LASTA Bresult, Pg, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_1BYTE, REG_V6, REG_P1, REG_V27, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_2BYTE, REG_V5, REG_P2, REG_V26, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R1, REG_P5, REG_V23, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R0, REG_P6, REG_V22, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) => ExtractAfterLastVector(value); + + /// + /// uint16_t svlasta[_u16](svbool_t pg, svuint16_t op) + /// LASTA Wresult, Pg, Zop.H + /// LASTA Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_1BYTE, REG_V6, REG_P1, REG_V27, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_2BYTE, REG_V5, REG_P2, REG_V26, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R1, REG_P5, REG_V23, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R0, REG_P6, REG_V22, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) => ExtractAfterLastVector(value); + + /// + /// uint32_t svlasta[_u32](svbool_t pg, svuint32_t op) + /// LASTA Wresult, Pg, Zop.S + /// LASTA Sresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_1BYTE, REG_V6, REG_P1, REG_V27, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_2BYTE, REG_V5, REG_P2, REG_V26, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R1, REG_P5, REG_V23, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R0, REG_P6, REG_V22, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) => ExtractAfterLastVector(value); + + /// + /// uint64_t svlasta[_u64](svbool_t pg, svuint64_t op) + /// LASTA Xresult, Pg, Zop.D + /// LASTA Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_1BYTE, REG_V6, REG_P1, REG_V27, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_2BYTE, REG_V5, REG_P2, REG_V26, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R1, REG_P5, REG_V23, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R0, REG_P6, REG_V22, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) => ExtractAfterLastVector(value); + + /// + /// float32_t svlasta[_f32](svbool_t pg, svfloat32_t op) + /// LASTA Wresult, Pg, Zop.S + /// LASTA Sresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_1BYTE, REG_V6, REG_P1, REG_V27, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_2BYTE, REG_V5, REG_P2, REG_V26, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R1, REG_P5, REG_V23, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R0, REG_P6, REG_V22, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) => ExtractAfterLastVector(value); + + /// + /// float64_t svlasta[_f64](svbool_t pg, svfloat64_t op) + /// LASTA Xresult, Pg, Zop.D + /// LASTA Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_1BYTE, REG_V6, REG_P1, REG_V27, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_2BYTE, REG_V5, REG_P2, REG_V26, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R1, REG_P5, REG_V23, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R0, REG_P6, REG_V22, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) => ExtractAfterLastVector(value); + + + /// ExtractLastScalar : Extract last element + + /// + /// int8_t svlastb[_s8](svbool_t pg, svint8_t op) + /// LASTB Wresult, Pg, Zop.B + /// LASTB Bresult, Pg, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_V4, REG_P3, REG_V25, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_V3, REG_P4, REG_V24, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_R30, REG_P7, REG_V21, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_R29, REG_P0, REG_V20, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe sbyte ExtractLastScalar(Vector value) => ExtractLastScalar(value); + + /// + /// int16_t svlastb[_s16](svbool_t pg, svint16_t op) + /// LASTB Wresult, Pg, Zop.H + /// LASTB Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_V4, REG_P3, REG_V25, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_V3, REG_P4, REG_V24, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_R30, REG_P7, REG_V21, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_R29, REG_P0, REG_V20, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe short ExtractLastScalar(Vector value) => ExtractLastScalar(value); + + /// + /// int32_t svlastb[_s32](svbool_t pg, svint32_t op) + /// LASTB Wresult, Pg, Zop.S + /// LASTB Sresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_V4, REG_P3, REG_V25, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_V3, REG_P4, REG_V24, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_R30, REG_P7, REG_V21, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_R29, REG_P0, REG_V20, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe int ExtractLastScalar(Vector value) => ExtractLastScalar(value); + + /// + /// int64_t svlastb[_s64](svbool_t pg, svint64_t op) + /// LASTB Xresult, Pg, Zop.D + /// LASTB Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_V4, REG_P3, REG_V25, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_V3, REG_P4, REG_V24, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_R30, REG_P7, REG_V21, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_R29, REG_P0, REG_V20, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe long ExtractLastScalar(Vector value) => ExtractLastScalar(value); + + /// + /// uint8_t svlastb[_u8](svbool_t pg, svuint8_t op) + /// LASTB Wresult, Pg, Zop.B + /// LASTB Bresult, Pg, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_V4, REG_P3, REG_V25, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_V3, REG_P4, REG_V24, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_R30, REG_P7, REG_V21, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_R29, REG_P0, REG_V20, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe byte ExtractLastScalar(Vector value) => ExtractLastScalar(value); + + /// + /// uint16_t svlastb[_u16](svbool_t pg, svuint16_t op) + /// LASTB Wresult, Pg, Zop.H + /// LASTB Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_V4, REG_P3, REG_V25, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_V3, REG_P4, REG_V24, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_R30, REG_P7, REG_V21, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_R29, REG_P0, REG_V20, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe ushort ExtractLastScalar(Vector value) => ExtractLastScalar(value); + + /// + /// uint32_t svlastb[_u32](svbool_t pg, svuint32_t op) + /// LASTB Wresult, Pg, Zop.S + /// LASTB Sresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_V4, REG_P3, REG_V25, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_V3, REG_P4, REG_V24, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_R30, REG_P7, REG_V21, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_R29, REG_P0, REG_V20, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe uint ExtractLastScalar(Vector value) => ExtractLastScalar(value); + + /// + /// uint64_t svlastb[_u64](svbool_t pg, svuint64_t op) + /// LASTB Xresult, Pg, Zop.D + /// LASTB Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_V4, REG_P3, REG_V25, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_V3, REG_P4, REG_V24, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_R30, REG_P7, REG_V21, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_R29, REG_P0, REG_V20, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe ulong ExtractLastScalar(Vector value) => ExtractLastScalar(value); + + /// + /// float32_t svlastb[_f32](svbool_t pg, svfloat32_t op) + /// LASTB Wresult, Pg, Zop.S + /// LASTB Sresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_V4, REG_P3, REG_V25, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_V3, REG_P4, REG_V24, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_R30, REG_P7, REG_V21, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_R29, REG_P0, REG_V20, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe float ExtractLastScalar(Vector value) => ExtractLastScalar(value); + + /// + /// float64_t svlastb[_f64](svbool_t pg, svfloat64_t op) + /// LASTB Xresult, Pg, Zop.D + /// LASTB Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_V4, REG_P3, REG_V25, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_V3, REG_P4, REG_V24, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_R30, REG_P7, REG_V21, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_R29, REG_P0, REG_V20, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe double ExtractLastScalar(Vector value) => ExtractLastScalar(value); + + + /// ExtractLastVector : Extract last element + + /// + /// int8_t svlastb[_s8](svbool_t pg, svint8_t op) + /// LASTB Wresult, Pg, Zop.B + /// LASTB Bresult, Pg, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_V4, REG_P3, REG_V25, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_V3, REG_P4, REG_V24, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_R30, REG_P7, REG_V21, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_R29, REG_P0, REG_V20, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ExtractLastVector(Vector value) => ExtractLastVector(value); + + /// + /// int16_t svlastb[_s16](svbool_t pg, svint16_t op) + /// LASTB Wresult, Pg, Zop.H + /// LASTB Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_V4, REG_P3, REG_V25, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_V3, REG_P4, REG_V24, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_R30, REG_P7, REG_V21, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_R29, REG_P0, REG_V20, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ExtractLastVector(Vector value) => ExtractLastVector(value); + + /// + /// int32_t svlastb[_s32](svbool_t pg, svint32_t op) + /// LASTB Wresult, Pg, Zop.S + /// LASTB Sresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_V4, REG_P3, REG_V25, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_V3, REG_P4, REG_V24, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_R30, REG_P7, REG_V21, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_R29, REG_P0, REG_V20, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ExtractLastVector(Vector value) => ExtractLastVector(value); + + /// + /// int64_t svlastb[_s64](svbool_t pg, svint64_t op) + /// LASTB Xresult, Pg, Zop.D + /// LASTB Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_V4, REG_P3, REG_V25, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_V3, REG_P4, REG_V24, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_R30, REG_P7, REG_V21, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_R29, REG_P0, REG_V20, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ExtractLastVector(Vector value) => ExtractLastVector(value); + + /// + /// uint8_t svlastb[_u8](svbool_t pg, svuint8_t op) + /// LASTB Wresult, Pg, Zop.B + /// LASTB Bresult, Pg, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_V4, REG_P3, REG_V25, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_V3, REG_P4, REG_V24, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_R30, REG_P7, REG_V21, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_R29, REG_P0, REG_V20, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ExtractLastVector(Vector value) => ExtractLastVector(value); + + /// + /// uint16_t svlastb[_u16](svbool_t pg, svuint16_t op) + /// LASTB Wresult, Pg, Zop.H + /// LASTB Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_V4, REG_P3, REG_V25, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_V3, REG_P4, REG_V24, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_R30, REG_P7, REG_V21, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_R29, REG_P0, REG_V20, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ExtractLastVector(Vector value) => ExtractLastVector(value); + + /// + /// uint32_t svlastb[_u32](svbool_t pg, svuint32_t op) + /// LASTB Wresult, Pg, Zop.S + /// LASTB Sresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_V4, REG_P3, REG_V25, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_V3, REG_P4, REG_V24, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_R30, REG_P7, REG_V21, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_R29, REG_P0, REG_V20, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ExtractLastVector(Vector value) => ExtractLastVector(value); + + /// + /// uint64_t svlastb[_u64](svbool_t pg, svuint64_t op) + /// LASTB Xresult, Pg, Zop.D + /// LASTB Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_V4, REG_P3, REG_V25, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_V3, REG_P4, REG_V24, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_R30, REG_P7, REG_V21, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_R29, REG_P0, REG_V20, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ExtractLastVector(Vector value) => ExtractLastVector(value); + + /// + /// float32_t svlastb[_f32](svbool_t pg, svfloat32_t op) + /// LASTB Wresult, Pg, Zop.S + /// LASTB Sresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_V4, REG_P3, REG_V25, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_V3, REG_P4, REG_V24, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_R30, REG_P7, REG_V21, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_R29, REG_P0, REG_V20, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ExtractLastVector(Vector value) => ExtractLastVector(value); + + /// + /// float64_t svlastb[_f64](svbool_t pg, svfloat64_t op) + /// LASTB Xresult, Pg, Zop.D + /// LASTB Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_V4, REG_P3, REG_V25, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_V3, REG_P4, REG_V24, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_R30, REG_P7, REG_V21, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_R29, REG_P0, REG_V20, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ExtractLastVector(Vector value) => ExtractLastVector(value); + + + /// ExtractVector : Extract vector from pair of vectors + + /// + /// svint8_t svext[_s8](svint8_t op1, svint8_t op2, uint64_t imm3) + /// EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 + /// MOVPRFX Zresult, Zop1; EXT Zresult.B, Zresult.B, Zop2.B, #imm3 + /// + /// codegenarm64test: + /// sve_ext - not implemented in coreclr + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) => ExtractVector(upper, lower, index); + + /// + /// svint16_t svext[_s16](svint16_t op1, svint16_t op2, uint64_t imm3) + /// EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 2 + /// MOVPRFX Zresult, Zop1; EXT Zresult.B, Zresult.B, Zop2.B, #imm3 * 2 + /// + /// codegenarm64test: + /// sve_ext - not implemented in coreclr + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) => ExtractVector(upper, lower, index); + + /// + /// svint32_t svext[_s32](svint32_t op1, svint32_t op2, uint64_t imm3) + /// EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 4 + /// MOVPRFX Zresult, Zop1; EXT Zresult.B, Zresult.B, Zop2.B, #imm3 * 4 + /// + /// codegenarm64test: + /// sve_ext - not implemented in coreclr + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) => ExtractVector(upper, lower, index); + + /// + /// svint64_t svext[_s64](svint64_t op1, svint64_t op2, uint64_t imm3) + /// EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 8 + /// MOVPRFX Zresult, Zop1; EXT Zresult.B, Zresult.B, Zop2.B, #imm3 * 8 + /// + /// codegenarm64test: + /// sve_ext - not implemented in coreclr + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) => ExtractVector(upper, lower, index); + + /// + /// svuint8_t svext[_u8](svuint8_t op1, svuint8_t op2, uint64_t imm3) + /// EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 + /// MOVPRFX Zresult, Zop1; EXT Zresult.B, Zresult.B, Zop2.B, #imm3 + /// + /// codegenarm64test: + /// sve_ext - not implemented in coreclr + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) => ExtractVector(upper, lower, index); + + /// + /// svuint16_t svext[_u16](svuint16_t op1, svuint16_t op2, uint64_t imm3) + /// EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 2 + /// MOVPRFX Zresult, Zop1; EXT Zresult.B, Zresult.B, Zop2.B, #imm3 * 2 + /// + /// codegenarm64test: + /// sve_ext - not implemented in coreclr + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) => ExtractVector(upper, lower, index); + + /// + /// svuint32_t svext[_u32](svuint32_t op1, svuint32_t op2, uint64_t imm3) + /// EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 4 + /// MOVPRFX Zresult, Zop1; EXT Zresult.B, Zresult.B, Zop2.B, #imm3 * 4 + /// + /// codegenarm64test: + /// sve_ext - not implemented in coreclr + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) => ExtractVector(upper, lower, index); + + /// + /// svuint64_t svext[_u64](svuint64_t op1, svuint64_t op2, uint64_t imm3) + /// EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 8 + /// MOVPRFX Zresult, Zop1; EXT Zresult.B, Zresult.B, Zop2.B, #imm3 * 8 + /// + /// codegenarm64test: + /// sve_ext - not implemented in coreclr + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) => ExtractVector(upper, lower, index); + + /// + /// svfloat32_t svext[_f32](svfloat32_t op1, svfloat32_t op2, uint64_t imm3) + /// EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 4 + /// MOVPRFX Zresult, Zop1; EXT Zresult.B, Zresult.B, Zop2.B, #imm3 * 4 + /// + /// codegenarm64test: + /// sve_ext - not implemented in coreclr + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) => ExtractVector(upper, lower, index); + + /// + /// svfloat64_t svext[_f64](svfloat64_t op1, svfloat64_t op2, uint64_t imm3) + /// EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 8 + /// MOVPRFX Zresult, Zop1; EXT Zresult.B, Zresult.B, Zop2.B, #imm3 * 8 + /// + /// codegenarm64test: + /// sve_ext - not implemented in coreclr + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) => ExtractVector(upper, lower, index); + + + /// FloatingPointExponentialAccelerator : Floating-point exponential accelerator + + /// + /// svfloat32_t svexpa[_f32](svuint32_t op) + /// FEXPA Zresult.S, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_BJ_2A FEXPA ., . + /// theEmitter->emitIns_R_R(INS_sve_fexpa, EA_SCALABLE, REG_V0, REG_V1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_fexpa, EA_SCALABLE, REG_V3, REG_V0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_fexpa, EA_SCALABLE, REG_V1, REG_V0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector FloatingPointExponentialAccelerator(Vector value) => FloatingPointExponentialAccelerator(value); + + /// + /// svfloat64_t svexpa[_f64](svuint64_t op) + /// FEXPA Zresult.D, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_BJ_2A FEXPA ., . + /// theEmitter->emitIns_R_R(INS_sve_fexpa, EA_SCALABLE, REG_V0, REG_V1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_fexpa, EA_SCALABLE, REG_V3, REG_V0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_fexpa, EA_SCALABLE, REG_V1, REG_V0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector FloatingPointExponentialAccelerator(Vector value) => FloatingPointExponentialAccelerator(value); + + + /// FusedMultiplyAdd : Multiply-add, addend first + + /// + /// svfloat32_t svmla[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// FMLA Ztied1.S, Pg/M, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; FMLA Zresult.S, Pg/M, Zop2.S, Zop3.S + /// svfloat32_t svmla[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// FMLA Ztied1.S, Pg/M, Zop2.S, Zop3.S + /// FMAD Ztied2.S, Pg/M, Zop3.S, Zop1.S + /// FMAD Ztied3.S, Pg/M, Zop2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; FMLA Zresult.S, Pg/M, Zop2.S, Zop3.S + /// svfloat32_t svmla[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; FMLA Zresult.S, Pg/M, Zop2.S, Zop3.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; FMAD Zresult.S, Pg/M, Zop3.S, Zop1.S + /// MOVPRFX Zresult.S, Pg/Z, Zop3.S; FMAD Zresult.S, Pg/M, Zop2.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_HU_4A FMLA ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fmla, EA_SCALABLE, REG_V0, REG_P0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// IF_SVE_GU_3A FMLA .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmla, EA_SCALABLE, REG_V0, REG_V2, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmla, EA_SCALABLE, REG_V4, REG_V6, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_GU_3B FMLA .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmla, EA_SCALABLE, REG_V1, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmla, EA_SCALABLE, REG_V3, REG_V2, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector FusedMultiplyAdd(Vector addend, Vector left, Vector right) => FusedMultiplyAdd(addend, left, right); + + /// + /// svfloat64_t svmla[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// FMLA Ztied1.D, Pg/M, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; FMLA Zresult.D, Pg/M, Zop2.D, Zop3.D + /// svfloat64_t svmla[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// FMLA Ztied1.D, Pg/M, Zop2.D, Zop3.D + /// FMAD Ztied2.D, Pg/M, Zop3.D, Zop1.D + /// FMAD Ztied3.D, Pg/M, Zop2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; FMLA Zresult.D, Pg/M, Zop2.D, Zop3.D + /// svfloat64_t svmla[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; FMLA Zresult.D, Pg/M, Zop2.D, Zop3.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; FMAD Zresult.D, Pg/M, Zop3.D, Zop1.D + /// MOVPRFX Zresult.D, Pg/Z, Zop3.D; FMAD Zresult.D, Pg/M, Zop2.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_HU_4A FMLA ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fmla, EA_SCALABLE, REG_V0, REG_P0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// IF_SVE_GU_3A FMLA .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmla, EA_SCALABLE, REG_V0, REG_V2, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmla, EA_SCALABLE, REG_V4, REG_V6, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_GU_3B FMLA .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmla, EA_SCALABLE, REG_V1, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmla, EA_SCALABLE, REG_V3, REG_V2, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector FusedMultiplyAdd(Vector addend, Vector left, Vector right) => FusedMultiplyAdd(addend, left, right); + + + /// FusedMultiplyAddBySelectedScalar : Multiply-add, addend first + + /// + /// svfloat32_t svmla_lane[_f32](svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_index) + /// FMLA Ztied1.S, Zop2.S, Zop3.S[imm_index] + /// MOVPRFX Zresult, Zop1; FMLA Zresult.S, Zop2.S, Zop3.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_HU_4A FMLA ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fmla, EA_SCALABLE, REG_V0, REG_P0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// IF_SVE_GU_3A FMLA .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmla, EA_SCALABLE, REG_V0, REG_V2, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmla, EA_SCALABLE, REG_V4, REG_V6, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_GU_3B FMLA .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmla, EA_SCALABLE, REG_V1, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmla, EA_SCALABLE, REG_V3, REG_V2, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector FusedMultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => FusedMultiplyAddBySelectedScalar(addend, left, right, rightIndex); + + /// + /// svfloat64_t svmla_lane[_f64](svfloat64_t op1, svfloat64_t op2, svfloat64_t op3, uint64_t imm_index) + /// FMLA Ztied1.D, Zop2.D, Zop3.D[imm_index] + /// MOVPRFX Zresult, Zop1; FMLA Zresult.D, Zop2.D, Zop3.D[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_HU_4A FMLA ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fmla, EA_SCALABLE, REG_V0, REG_P0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// IF_SVE_GU_3A FMLA .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmla, EA_SCALABLE, REG_V0, REG_V2, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmla, EA_SCALABLE, REG_V4, REG_V6, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_GU_3B FMLA .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmla, EA_SCALABLE, REG_V1, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmla, EA_SCALABLE, REG_V3, REG_V2, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector FusedMultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => FusedMultiplyAddBySelectedScalar(addend, left, right, rightIndex); + + + /// FusedMultiplyAddNegated : Negated multiply-add, addend first + + /// + /// svfloat32_t svnmla[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// FNMLA Ztied1.S, Pg/M, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; FNMLA Zresult.S, Pg/M, Zop2.S, Zop3.S + /// svfloat32_t svnmla[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// FNMLA Ztied1.S, Pg/M, Zop2.S, Zop3.S + /// FNMAD Ztied2.S, Pg/M, Zop3.S, Zop1.S + /// FNMAD Ztied3.S, Pg/M, Zop2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; FNMLA Zresult.S, Pg/M, Zop2.S, Zop3.S + /// svfloat32_t svnmla[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; FNMLA Zresult.S, Pg/M, Zop2.S, Zop3.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; FNMAD Zresult.S, Pg/M, Zop3.S, Zop1.S + /// MOVPRFX Zresult.S, Pg/Z, Zop3.S; FNMAD Zresult.S, Pg/M, Zop2.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_HU_4A FNMLA ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fnmla, EA_SCALABLE, REG_V6, REG_P4, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector FusedMultiplyAddNegated(Vector addend, Vector left, Vector right) => FusedMultiplyAddNegated(addend, left, right); + + /// + /// svfloat64_t svnmla[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// FNMLA Ztied1.D, Pg/M, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; FNMLA Zresult.D, Pg/M, Zop2.D, Zop3.D + /// svfloat64_t svnmla[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// FNMLA Ztied1.D, Pg/M, Zop2.D, Zop3.D + /// FNMAD Ztied2.D, Pg/M, Zop3.D, Zop1.D + /// FNMAD Ztied3.D, Pg/M, Zop2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; FNMLA Zresult.D, Pg/M, Zop2.D, Zop3.D + /// svfloat64_t svnmla[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; FNMLA Zresult.D, Pg/M, Zop2.D, Zop3.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; FNMAD Zresult.D, Pg/M, Zop3.D, Zop1.D + /// MOVPRFX Zresult.D, Pg/Z, Zop3.D; FNMAD Zresult.D, Pg/M, Zop2.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_HU_4A FNMLA ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fnmla, EA_SCALABLE, REG_V6, REG_P4, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector FusedMultiplyAddNegated(Vector addend, Vector left, Vector right) => FusedMultiplyAddNegated(addend, left, right); + + + /// FusedMultiplySubtract : Multiply-subtract, minuend first + + /// + /// svfloat32_t svmls[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// FMLS Ztied1.S, Pg/M, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; FMLS Zresult.S, Pg/M, Zop2.S, Zop3.S + /// svfloat32_t svmls[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// FMLS Ztied1.S, Pg/M, Zop2.S, Zop3.S + /// FMSB Ztied2.S, Pg/M, Zop3.S, Zop1.S + /// FMSB Ztied3.S, Pg/M, Zop2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; FMLS Zresult.S, Pg/M, Zop2.S, Zop3.S + /// svfloat32_t svmls[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; FMLS Zresult.S, Pg/M, Zop2.S, Zop3.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; FMSB Zresult.S, Pg/M, Zop3.S, Zop1.S + /// MOVPRFX Zresult.S, Pg/Z, Zop3.S; FMSB Zresult.S, Pg/M, Zop2.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_HU_4A FMLS ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fmls, EA_SCALABLE, REG_V3, REG_P2, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// IF_SVE_GU_3A FMLS .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmls, EA_SCALABLE, REG_V8, REG_V10, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmls, EA_SCALABLE, REG_V12, REG_V14, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_GU_3B FMLS .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmls, EA_SCALABLE, REG_V5, REG_V4, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmls, EA_SCALABLE, REG_V7, REG_V6, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector FusedMultiplySubtract(Vector minuend, Vector left, Vector right) => FusedMultiplySubtract(minuend, left, right); + + /// + /// svfloat64_t svmls[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// FMLS Ztied1.D, Pg/M, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; FMLS Zresult.D, Pg/M, Zop2.D, Zop3.D + /// svfloat64_t svmls[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// FMLS Ztied1.D, Pg/M, Zop2.D, Zop3.D + /// FMSB Ztied2.D, Pg/M, Zop3.D, Zop1.D + /// FMSB Ztied3.D, Pg/M, Zop2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; FMLS Zresult.D, Pg/M, Zop2.D, Zop3.D + /// svfloat64_t svmls[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; FMLS Zresult.D, Pg/M, Zop2.D, Zop3.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; FMSB Zresult.D, Pg/M, Zop3.D, Zop1.D + /// MOVPRFX Zresult.D, Pg/Z, Zop3.D; FMSB Zresult.D, Pg/M, Zop2.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_HU_4A FMLS ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fmls, EA_SCALABLE, REG_V3, REG_P2, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// IF_SVE_GU_3A FMLS .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmls, EA_SCALABLE, REG_V8, REG_V10, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmls, EA_SCALABLE, REG_V12, REG_V14, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_GU_3B FMLS .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmls, EA_SCALABLE, REG_V5, REG_V4, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmls, EA_SCALABLE, REG_V7, REG_V6, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector FusedMultiplySubtract(Vector minuend, Vector left, Vector right) => FusedMultiplySubtract(minuend, left, right); + + + /// FusedMultiplySubtractBySelectedScalar : Multiply-subtract, minuend first + + /// + /// svfloat32_t svmls_lane[_f32](svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_index) + /// FMLS Ztied1.S, Zop2.S, Zop3.S[imm_index] + /// MOVPRFX Zresult, Zop1; FMLS Zresult.S, Zop2.S, Zop3.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_HU_4A FMLS ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fmls, EA_SCALABLE, REG_V3, REG_P2, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// IF_SVE_GU_3A FMLS .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmls, EA_SCALABLE, REG_V8, REG_V10, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmls, EA_SCALABLE, REG_V12, REG_V14, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_GU_3B FMLS .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmls, EA_SCALABLE, REG_V5, REG_V4, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmls, EA_SCALABLE, REG_V7, REG_V6, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector FusedMultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => FusedMultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); + + /// + /// svfloat64_t svmls_lane[_f64](svfloat64_t op1, svfloat64_t op2, svfloat64_t op3, uint64_t imm_index) + /// FMLS Ztied1.D, Zop2.D, Zop3.D[imm_index] + /// MOVPRFX Zresult, Zop1; FMLS Zresult.D, Zop2.D, Zop3.D[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_HU_4A FMLS ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fmls, EA_SCALABLE, REG_V3, REG_P2, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// IF_SVE_GU_3A FMLS .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmls, EA_SCALABLE, REG_V8, REG_V10, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmls, EA_SCALABLE, REG_V12, REG_V14, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_GU_3B FMLS .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmls, EA_SCALABLE, REG_V5, REG_V4, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmls, EA_SCALABLE, REG_V7, REG_V6, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector FusedMultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => FusedMultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); + + + /// FusedMultiplySubtractNegated : Negated multiply-subtract, minuend first + + /// + /// svfloat32_t svnmls[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// FNMLS Ztied1.S, Pg/M, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; FNMLS Zresult.S, Pg/M, Zop2.S, Zop3.S + /// svfloat32_t svnmls[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// FNMLS Ztied1.S, Pg/M, Zop2.S, Zop3.S + /// FNMSB Ztied2.S, Pg/M, Zop3.S, Zop1.S + /// FNMSB Ztied3.S, Pg/M, Zop2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; FNMLS Zresult.S, Pg/M, Zop2.S, Zop3.S + /// svfloat32_t svnmls[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; FNMLS Zresult.S, Pg/M, Zop2.S, Zop3.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; FNMSB Zresult.S, Pg/M, Zop3.S, Zop1.S + /// MOVPRFX Zresult.S, Pg/Z, Zop3.S; FNMSB Zresult.S, Pg/M, Zop2.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_HU_4A FNMLS ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fnmls, EA_SCALABLE, REG_V9, REG_P6, REG_V10, REG_V11, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector FusedMultiplySubtractNegated(Vector minuend, Vector left, Vector right) => FusedMultiplySubtractNegated(minuend, left, right); + + /// + /// svfloat64_t svnmls[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// FNMLS Ztied1.D, Pg/M, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; FNMLS Zresult.D, Pg/M, Zop2.D, Zop3.D + /// svfloat64_t svnmls[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// FNMLS Ztied1.D, Pg/M, Zop2.D, Zop3.D + /// FNMSB Ztied2.D, Pg/M, Zop3.D, Zop1.D + /// FNMSB Ztied3.D, Pg/M, Zop2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; FNMLS Zresult.D, Pg/M, Zop2.D, Zop3.D + /// svfloat64_t svnmls[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; FNMLS Zresult.D, Pg/M, Zop2.D, Zop3.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; FNMSB Zresult.D, Pg/M, Zop3.D, Zop1.D + /// MOVPRFX Zresult.D, Pg/Z, Zop3.D; FNMSB Zresult.D, Pg/M, Zop2.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_HU_4A FNMLS ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fnmls, EA_SCALABLE, REG_V9, REG_P6, REG_V10, REG_V11, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector FusedMultiplySubtractNegated(Vector minuend, Vector left, Vector right) => FusedMultiplySubtractNegated(minuend, left, right); + + + /// GatherPrefetch16Bit : Prefetch halfwords + + /// + /// void svprfh_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op) + /// PRFH op, Pg, [Xbase, Zindices.S, SXTW #1] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFH , , [, .S, #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R8, REG_V9, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFH , , [, .D, #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R8, REG_V9, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFH , , [, .D, LSL #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFH , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFH , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, 0); + /// IF_SVE_IB_3A PRFH , , [, , LSL #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R7, REG_R8, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch16Bit(mask, address, indices, prefetchType); + + /// + /// void svprfh_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op) + /// PRFH op, Pg, [Xbase, Zindices.D, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFH , , [, .S, #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R8, REG_V9, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFH , , [, .D, #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R8, REG_V9, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFH , , [, .D, LSL #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFH , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFH , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, 0); + /// IF_SVE_IB_3A PRFH , , [, , LSL #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R7, REG_R8, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch16Bit(mask, address, indices, prefetchType); + + /// + /// void svprfh_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) + /// PRFH op, Pg, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFH , , [, .S, #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R8, REG_V9, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFH , , [, .D, #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R8, REG_V9, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFH , , [, .D, LSL #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFH , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFH , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, 0); + /// IF_SVE_IB_3A PRFH , , [, , LSL #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R7, REG_R8, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch16Bit(mask, addresses, prefetchType); + + /// + /// void svprfh_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op) + /// PRFH op, Pg, [Xbase, Zindices.S, UXTW #1] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFH , , [, .S, #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R8, REG_V9, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFH , , [, .D, #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R8, REG_V9, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFH , , [, .D, LSL #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFH , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFH , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, 0); + /// IF_SVE_IB_3A PRFH , , [, , LSL #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R7, REG_R8, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch16Bit(mask, address, indices, prefetchType); + + /// + /// void svprfh_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) + /// PRFH op, Pg, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFH , , [, .S, #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R8, REG_V9, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFH , , [, .D, #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R8, REG_V9, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFH , , [, .D, LSL #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFH , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFH , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, 0); + /// IF_SVE_IB_3A PRFH , , [, , LSL #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R7, REG_R8, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch16Bit(mask, addresses, prefetchType); + + /// + /// void svprfh_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op) + /// PRFH op, Pg, [Xbase, Zindices.D, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFH , , [, .S, #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R8, REG_V9, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFH , , [, .D, #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R8, REG_V9, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFH , , [, .D, LSL #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFH , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFH , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, 0); + /// IF_SVE_IB_3A PRFH , , [, , LSL #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R7, REG_R8, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch16Bit(mask, address, indices, prefetchType); + + /// + /// void svprfh_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op) + /// PRFH op, Pg, [Xbase, Zindices.S, SXTW #1] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFH , , [, .S, #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R8, REG_V9, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFH , , [, .D, #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R8, REG_V9, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFH , , [, .D, LSL #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFH , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFH , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, 0); + /// IF_SVE_IB_3A PRFH , , [, , LSL #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R7, REG_R8, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch16Bit(mask, address, indices, prefetchType); + + /// + /// void svprfh_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op) + /// PRFH op, Pg, [Xbase, Zindices.D, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFH , , [, .S, #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R8, REG_V9, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFH , , [, .D, #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R8, REG_V9, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFH , , [, .D, LSL #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFH , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFH , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, 0); + /// IF_SVE_IB_3A PRFH , , [, , LSL #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R7, REG_R8, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch16Bit(mask, address, indices, prefetchType); + + /// + /// void svprfh_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) + /// PRFH op, Pg, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFH , , [, .S, #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R8, REG_V9, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFH , , [, .D, #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R8, REG_V9, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFH , , [, .D, LSL #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFH , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFH , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, 0); + /// IF_SVE_IB_3A PRFH , , [, , LSL #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R7, REG_R8, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch16Bit(mask, addresses, prefetchType); + + /// + /// void svprfh_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op) + /// PRFH op, Pg, [Xbase, Zindices.S, UXTW #1] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFH , , [, .S, #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R8, REG_V9, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFH , , [, .D, #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R8, REG_V9, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFH , , [, .D, LSL #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFH , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFH , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, 0); + /// IF_SVE_IB_3A PRFH , , [, , LSL #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R7, REG_R8, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch16Bit(mask, address, indices, prefetchType); + + /// + /// void svprfh_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) + /// PRFH op, Pg, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFH , , [, .S, #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R8, REG_V9, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFH , , [, .D, #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R8, REG_V9, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFH , , [, .D, LSL #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFH , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFH , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, 0); + /// IF_SVE_IB_3A PRFH , , [, , LSL #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R7, REG_R8, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch16Bit(mask, addresses, prefetchType); + + /// + /// void svprfh_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op) + /// PRFH op, Pg, [Xbase, Zindices.D, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFH , , [, .S, #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R8, REG_V9, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFH , , [, .D, #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R8, REG_V9, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFH , , [, .D, LSL #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFH , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFH , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, 0); + /// IF_SVE_IB_3A PRFH , , [, , LSL #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R7, REG_R8, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch16Bit(mask, address, indices, prefetchType); + + + /// GatherPrefetch32Bit : Prefetch words + + /// + /// void svprfw_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op) + /// PRFW op, Pg, [Xbase, Zindices.S, SXTW #2] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFW , , [, .S, #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R2, REG_V1, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFW , , [, .D, #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R2, REG_V1, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFW , , [, .D, LSL #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P3, REG_R2, REG_V1, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFW , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_V5, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_V5, 124, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFW , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R5, -32); + /// IF_SVE_IB_3A PRFW , , [, , LSL #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R1, REG_R9, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch32Bit(mask, address, indices, prefetchType); + + /// + /// void svprfw_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op) + /// PRFW op, Pg, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFW , , [, .S, #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R2, REG_V1, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFW , , [, .D, #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R2, REG_V1, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFW , , [, .D, LSL #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P3, REG_R2, REG_V1, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFW , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_V5, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_V5, 124, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFW , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R5, -32); + /// IF_SVE_IB_3A PRFW , , [, , LSL #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R1, REG_R9, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch32Bit(mask, address, indices, prefetchType); + + /// + /// void svprfw_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) + /// PRFW op, Pg, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFW , , [, .S, #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R2, REG_V1, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFW , , [, .D, #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R2, REG_V1, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFW , , [, .D, LSL #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P3, REG_R2, REG_V1, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFW , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_V5, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_V5, 124, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFW , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R5, -32); + /// IF_SVE_IB_3A PRFW , , [, , LSL #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R1, REG_R9, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch32Bit(mask, addresses, prefetchType); + + /// + /// void svprfw_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op) + /// PRFW op, Pg, [Xbase, Zindices.S, UXTW #2] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFW , , [, .S, #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R2, REG_V1, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFW , , [, .D, #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R2, REG_V1, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFW , , [, .D, LSL #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P3, REG_R2, REG_V1, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFW , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_V5, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_V5, 124, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFW , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R5, -32); + /// IF_SVE_IB_3A PRFW , , [, , LSL #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R1, REG_R9, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch32Bit(mask, address, indices, prefetchType); + + /// + /// void svprfw_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) + /// PRFW op, Pg, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFW , , [, .S, #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R2, REG_V1, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFW , , [, .D, #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R2, REG_V1, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFW , , [, .D, LSL #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P3, REG_R2, REG_V1, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFW , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_V5, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_V5, 124, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFW , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R5, -32); + /// IF_SVE_IB_3A PRFW , , [, , LSL #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R1, REG_R9, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch32Bit(mask, addresses, prefetchType); + + /// + /// void svprfw_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op) + /// PRFW op, Pg, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFW , , [, .S, #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R2, REG_V1, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFW , , [, .D, #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R2, REG_V1, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFW , , [, .D, LSL #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P3, REG_R2, REG_V1, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFW , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_V5, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_V5, 124, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFW , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R5, -32); + /// IF_SVE_IB_3A PRFW , , [, , LSL #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R1, REG_R9, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch32Bit(mask, address, indices, prefetchType); + + /// + /// void svprfw_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op) + /// PRFW op, Pg, [Xbase, Zindices.S, SXTW #2] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFW , , [, .S, #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R2, REG_V1, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFW , , [, .D, #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R2, REG_V1, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFW , , [, .D, LSL #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P3, REG_R2, REG_V1, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFW , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_V5, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_V5, 124, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFW , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R5, -32); + /// IF_SVE_IB_3A PRFW , , [, , LSL #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R1, REG_R9, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch32Bit(mask, address, indices, prefetchType); + + /// + /// void svprfw_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op) + /// PRFW op, Pg, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFW , , [, .S, #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R2, REG_V1, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFW , , [, .D, #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R2, REG_V1, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFW , , [, .D, LSL #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P3, REG_R2, REG_V1, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFW , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_V5, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_V5, 124, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFW , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R5, -32); + /// IF_SVE_IB_3A PRFW , , [, , LSL #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R1, REG_R9, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch32Bit(mask, address, indices, prefetchType); + + /// + /// void svprfw_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) + /// PRFW op, Pg, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFW , , [, .S, #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R2, REG_V1, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFW , , [, .D, #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R2, REG_V1, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFW , , [, .D, LSL #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P3, REG_R2, REG_V1, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFW , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_V5, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_V5, 124, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFW , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R5, -32); + /// IF_SVE_IB_3A PRFW , , [, , LSL #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R1, REG_R9, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch32Bit(mask, addresses, prefetchType); + + /// + /// void svprfw_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op) + /// PRFW op, Pg, [Xbase, Zindices.S, UXTW #2] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFW , , [, .S, #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R2, REG_V1, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFW , , [, .D, #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R2, REG_V1, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFW , , [, .D, LSL #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P3, REG_R2, REG_V1, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFW , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_V5, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_V5, 124, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFW , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R5, -32); + /// IF_SVE_IB_3A PRFW , , [, , LSL #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R1, REG_R9, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch32Bit(mask, address, indices, prefetchType); + + /// + /// void svprfw_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) + /// PRFW op, Pg, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFW , , [, .S, #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R2, REG_V1, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFW , , [, .D, #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R2, REG_V1, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFW , , [, .D, LSL #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P3, REG_R2, REG_V1, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFW , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_V5, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_V5, 124, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFW , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R5, -32); + /// IF_SVE_IB_3A PRFW , , [, , LSL #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R1, REG_R9, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch32Bit(mask, addresses, prefetchType); + + /// + /// void svprfw_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op) + /// PRFW op, Pg, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFW , , [, .S, #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R2, REG_V1, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFW , , [, .D, #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R2, REG_V1, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFW , , [, .D, LSL #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P3, REG_R2, REG_V1, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFW , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_V5, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_V5, 124, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFW , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R5, -32); + /// IF_SVE_IB_3A PRFW , , [, , LSL #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R1, REG_R9, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch32Bit(mask, address, indices, prefetchType); + + + /// GatherPrefetch64Bit : Prefetch doublewords + + /// + /// void svprfd_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op) + /// PRFD op, Pg, [Xbase, Zindices.S, SXTW #3] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFD , , [, .S, #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFD , , [, .D, #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFD , , [, .D, LSL #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R4, REG_V3, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFD , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P4, REG_V3, 248, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P4, REG_V3, 248, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFD , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R4, 31); + /// IF_SVE_IB_3A PRFD , , [, , LSL #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P5, REG_R4, REG_R3, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch64Bit(mask, address, indices, prefetchType); + + /// + /// void svprfd_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op) + /// PRFD op, Pg, [Xbase, Zindices.D, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFD , , [, .S, #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFD , , [, .D, #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFD , , [, .D, LSL #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R4, REG_V3, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFD , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P4, REG_V3, 248, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P4, REG_V3, 248, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFD , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R4, 31); + /// IF_SVE_IB_3A PRFD , , [, , LSL #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P5, REG_R4, REG_R3, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch64Bit(mask, address, indices, prefetchType); + + /// + /// void svprfd_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) + /// PRFD op, Pg, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFD , , [, .S, #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFD , , [, .D, #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFD , , [, .D, LSL #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R4, REG_V3, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFD , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P4, REG_V3, 248, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P4, REG_V3, 248, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFD , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R4, 31); + /// IF_SVE_IB_3A PRFD , , [, , LSL #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P5, REG_R4, REG_R3, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch64Bit(mask, addresses, prefetchType); + + /// + /// void svprfd_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op) + /// PRFD op, Pg, [Xbase, Zindices.S, UXTW #3] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFD , , [, .S, #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFD , , [, .D, #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFD , , [, .D, LSL #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R4, REG_V3, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFD , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P4, REG_V3, 248, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P4, REG_V3, 248, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFD , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R4, 31); + /// IF_SVE_IB_3A PRFD , , [, , LSL #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P5, REG_R4, REG_R3, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch64Bit(mask, address, indices, prefetchType); + + /// + /// void svprfd_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) + /// PRFD op, Pg, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFD , , [, .S, #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFD , , [, .D, #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFD , , [, .D, LSL #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R4, REG_V3, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFD , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P4, REG_V3, 248, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P4, REG_V3, 248, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFD , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R4, 31); + /// IF_SVE_IB_3A PRFD , , [, , LSL #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P5, REG_R4, REG_R3, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch64Bit(mask, addresses, prefetchType); + + /// + /// void svprfd_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op) + /// PRFD op, Pg, [Xbase, Zindices.D, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFD , , [, .S, #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFD , , [, .D, #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFD , , [, .D, LSL #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R4, REG_V3, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFD , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P4, REG_V3, 248, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P4, REG_V3, 248, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFD , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R4, 31); + /// IF_SVE_IB_3A PRFD , , [, , LSL #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P5, REG_R4, REG_R3, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch64Bit(mask, address, indices, prefetchType); + + /// + /// void svprfd_gather_[s32]index(svbool_t pg, const void *base, svint32_t indices, enum svprfop op) + /// PRFD op, Pg, [Xbase, Zindices.S, SXTW #3] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFD , , [, .S, #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFD , , [, .D, #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFD , , [, .D, LSL #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R4, REG_V3, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFD , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P4, REG_V3, 248, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P4, REG_V3, 248, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFD , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R4, 31); + /// IF_SVE_IB_3A PRFD , , [, , LSL #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P5, REG_R4, REG_R3, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch64Bit(mask, address, indices, prefetchType); + + /// + /// void svprfd_gather_[s64]index(svbool_t pg, const void *base, svint64_t indices, enum svprfop op) + /// PRFD op, Pg, [Xbase, Zindices.D, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFD , , [, .S, #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFD , , [, .D, #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFD , , [, .D, LSL #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R4, REG_V3, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFD , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P4, REG_V3, 248, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P4, REG_V3, 248, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFD , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R4, 31); + /// IF_SVE_IB_3A PRFD , , [, , LSL #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P5, REG_R4, REG_R3, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch64Bit(mask, address, indices, prefetchType); + + /// + /// void svprfd_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) + /// PRFD op, Pg, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFD , , [, .S, #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFD , , [, .D, #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFD , , [, .D, LSL #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R4, REG_V3, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFD , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P4, REG_V3, 248, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P4, REG_V3, 248, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFD , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R4, 31); + /// IF_SVE_IB_3A PRFD , , [, , LSL #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P5, REG_R4, REG_R3, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch64Bit(mask, addresses, prefetchType); + + /// + /// void svprfd_gather_[u32]index(svbool_t pg, const void *base, svuint32_t indices, enum svprfop op) + /// PRFD op, Pg, [Xbase, Zindices.S, UXTW #3] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFD , , [, .S, #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFD , , [, .D, #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFD , , [, .D, LSL #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R4, REG_V3, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFD , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P4, REG_V3, 248, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P4, REG_V3, 248, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFD , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R4, 31); + /// IF_SVE_IB_3A PRFD , , [, , LSL #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P5, REG_R4, REG_R3, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch64Bit(mask, address, indices, prefetchType); + + /// + /// void svprfd_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) + /// PRFD op, Pg, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFD , , [, .S, #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFD , , [, .D, #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFD , , [, .D, LSL #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R4, REG_V3, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFD , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P4, REG_V3, 248, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P4, REG_V3, 248, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFD , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R4, 31); + /// IF_SVE_IB_3A PRFD , , [, , LSL #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P5, REG_R4, REG_R3, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch64Bit(mask, addresses, prefetchType); + + /// + /// void svprfd_gather_[u64]index(svbool_t pg, const void *base, svuint64_t indices, enum svprfop op) + /// PRFD op, Pg, [Xbase, Zindices.D, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFD , , [, .S, #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFD , , [, .D, #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFD , , [, .D, LSL #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R4, REG_V3, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFD , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P4, REG_V3, 248, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P4, REG_V3, 248, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFD , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R4, 31); + /// IF_SVE_IB_3A PRFD , , [, , LSL #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P5, REG_R4, REG_R3, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch64Bit(mask, address, indices, prefetchType); + + + /// GatherPrefetch8Bit : Prefetch bytes + + /// + /// void svprfb_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, enum svprfop op) + /// PRFB op, Pg, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFB , , [, .S, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL2KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL2STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL3KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL3STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL1STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL2KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL2STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL3KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL3STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST6, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST7, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST14, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST15, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// IF_SVE_HY_3A_A PRFB , , [, .D, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// IF_SVE_HY_3B PRFB , , [, .D] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HZ_2A_B PRFB , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_V2, 31, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFB , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P2, REG_R3, -32); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P2, REG_R3, 17); + /// IF_SVE_IB_3A PRFB , , [, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R1, REG_R2); + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch8Bit(mask, address, offsets, prefetchType); + + /// + /// void svprfb_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, enum svprfop op) + /// PRFB op, Pg, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFB , , [, .S, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL2KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL2STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL3KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL3STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL1STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL2KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL2STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL3KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL3STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST6, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST7, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST14, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST15, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// IF_SVE_HY_3A_A PRFB , , [, .D, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// IF_SVE_HY_3B PRFB , , [, .D] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HZ_2A_B PRFB , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_V2, 31, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFB , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P2, REG_R3, -32); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P2, REG_R3, 17); + /// IF_SVE_IB_3A PRFB , , [, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R1, REG_R2); + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch8Bit(mask, address, offsets, prefetchType); + + /// + /// void svprfb_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) + /// PRFB op, Pg, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFB , , [, .S, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL2KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL2STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL3KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL3STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL1STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL2KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL2STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL3KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL3STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST6, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST7, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST14, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST15, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// IF_SVE_HY_3A_A PRFB , , [, .D, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// IF_SVE_HY_3B PRFB , , [, .D] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HZ_2A_B PRFB , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_V2, 31, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFB , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P2, REG_R3, -32); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P2, REG_R3, 17); + /// IF_SVE_IB_3A PRFB , , [, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R1, REG_R2); + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch8Bit(mask, addresses, prefetchType); + + /// + /// void svprfb_gather_[u32]offset(svbool_t pg, const void *base, svuint32_t offsets, enum svprfop op) + /// PRFB op, Pg, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFB , , [, .S, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL2KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL2STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL3KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL3STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL1STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL2KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL2STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL3KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL3STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST6, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST7, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST14, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST15, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// IF_SVE_HY_3A_A PRFB , , [, .D, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// IF_SVE_HY_3B PRFB , , [, .D] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HZ_2A_B PRFB , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_V2, 31, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFB , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P2, REG_R3, -32); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P2, REG_R3, 17); + /// IF_SVE_IB_3A PRFB , , [, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R1, REG_R2); + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch8Bit(mask, address, offsets, prefetchType); + + /// + /// void svprfb_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) + /// PRFB op, Pg, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFB , , [, .S, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL2KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL2STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL3KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL3STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL1STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL2KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL2STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL3KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL3STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST6, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST7, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST14, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST15, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// IF_SVE_HY_3A_A PRFB , , [, .D, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// IF_SVE_HY_3B PRFB , , [, .D] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HZ_2A_B PRFB , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_V2, 31, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFB , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P2, REG_R3, -32); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P2, REG_R3, 17); + /// IF_SVE_IB_3A PRFB , , [, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R1, REG_R2); + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch8Bit(mask, addresses, prefetchType); + + /// + /// void svprfb_gather_[u64]offset(svbool_t pg, const void *base, svuint64_t offsets, enum svprfop op) + /// PRFB op, Pg, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFB , , [, .S, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL2KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL2STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL3KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL3STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL1STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL2KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL2STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL3KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL3STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST6, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST7, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST14, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST15, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// IF_SVE_HY_3A_A PRFB , , [, .D, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// IF_SVE_HY_3B PRFB , , [, .D] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HZ_2A_B PRFB , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_V2, 31, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFB , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P2, REG_R3, -32); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P2, REG_R3, 17); + /// IF_SVE_IB_3A PRFB , , [, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R1, REG_R2); + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch8Bit(mask, address, offsets, prefetchType); + + /// + /// void svprfb_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, enum svprfop op) + /// PRFB op, Pg, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFB , , [, .S, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL2KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL2STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL3KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL3STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL1STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL2KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL2STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL3KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL3STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST6, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST7, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST14, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST15, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// IF_SVE_HY_3A_A PRFB , , [, .D, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// IF_SVE_HY_3B PRFB , , [, .D] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HZ_2A_B PRFB , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_V2, 31, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFB , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P2, REG_R3, -32); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P2, REG_R3, 17); + /// IF_SVE_IB_3A PRFB , , [, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R1, REG_R2); + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch8Bit(mask, address, offsets, prefetchType); + + /// + /// void svprfb_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, enum svprfop op) + /// PRFB op, Pg, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFB , , [, .S, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL2KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL2STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL3KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL3STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL1STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL2KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL2STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL3KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL3STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST6, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST7, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST14, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST15, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// IF_SVE_HY_3A_A PRFB , , [, .D, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// IF_SVE_HY_3B PRFB , , [, .D] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HZ_2A_B PRFB , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_V2, 31, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFB , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P2, REG_R3, -32); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P2, REG_R3, 17); + /// IF_SVE_IB_3A PRFB , , [, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R1, REG_R2); + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch8Bit(mask, address, offsets, prefetchType); + + /// + /// void svprfb_gather[_u32base](svbool_t pg, svuint32_t bases, enum svprfop op) + /// PRFB op, Pg, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFB , , [, .S, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL2KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL2STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL3KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL3STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL1STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL2KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL2STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL3KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL3STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST6, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST7, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST14, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST15, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// IF_SVE_HY_3A_A PRFB , , [, .D, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// IF_SVE_HY_3B PRFB , , [, .D] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HZ_2A_B PRFB , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_V2, 31, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFB , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P2, REG_R3, -32); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P2, REG_R3, 17); + /// IF_SVE_IB_3A PRFB , , [, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R1, REG_R2); + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch8Bit(mask, addresses, prefetchType); + + /// + /// void svprfb_gather_[u32]offset(svbool_t pg, const void *base, svuint32_t offsets, enum svprfop op) + /// PRFB op, Pg, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFB , , [, .S, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL2KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL2STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL3KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL3STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL1STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL2KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL2STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL3KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL3STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST6, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST7, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST14, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST15, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// IF_SVE_HY_3A_A PRFB , , [, .D, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// IF_SVE_HY_3B PRFB , , [, .D] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HZ_2A_B PRFB , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_V2, 31, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFB , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P2, REG_R3, -32); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P2, REG_R3, 17); + /// IF_SVE_IB_3A PRFB , , [, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R1, REG_R2); + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch8Bit(mask, address, offsets, prefetchType); + + /// + /// void svprfb_gather[_u64base](svbool_t pg, svuint64_t bases, enum svprfop op) + /// PRFB op, Pg, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFB , , [, .S, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL2KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL2STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL3KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL3STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL1STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL2KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL2STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL3KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL3STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST6, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST7, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST14, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST15, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// IF_SVE_HY_3A_A PRFB , , [, .D, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// IF_SVE_HY_3B PRFB , , [, .D] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HZ_2A_B PRFB , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_V2, 31, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFB , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P2, REG_R3, -32); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P2, REG_R3, 17); + /// IF_SVE_IB_3A PRFB , , [, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R1, REG_R2); + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch8Bit(mask, addresses, prefetchType); + + /// + /// void svprfb_gather_[u64]offset(svbool_t pg, const void *base, svuint64_t offsets, enum svprfop op) + /// PRFB op, Pg, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFB , , [, .S, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL2KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL2STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL3KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL3STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL1STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL2KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL2STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL3KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL3STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST6, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST7, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST14, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST15, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// IF_SVE_HY_3A_A PRFB , , [, .D, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// IF_SVE_HY_3B PRFB , , [, .D] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HZ_2A_B PRFB , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_V2, 31, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFB , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P2, REG_R3, -32); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P2, REG_R3, 17); + /// IF_SVE_IB_3A PRFB , , [, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R1, REG_R2); + /// + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType) => GatherPrefetch8Bit(mask, address, offsets, prefetchType); + + + /// GatherVector : Unextended load + + /// + /// svint32_t svld1_gather_[s32]index[_s32](svbool_t pg, const int32_t *base, svint32_t indices) + /// LD1W Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #2] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVector(Vector mask, int* address, Vector indices) => GatherVector(mask, address, indices); + + /// + /// svint32_t svld1_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// LD1W Zresult.S, Pg/Z, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVector(Vector mask, Vector addresses) => GatherVector(mask, addresses); + + /// + /// svint32_t svld1_gather_[u32]index[_s32](svbool_t pg, const int32_t *base, svuint32_t indices) + /// LD1W Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #2] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVector(Vector mask, int* address, Vector indices) => GatherVector(mask, address, indices); + + /// + /// svint64_t svld1_gather_[s64]index[_s64](svbool_t pg, const int64_t *base, svint64_t indices) + /// LD1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A LD1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V5, REG_P3, REG_R4, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IH_3A_A LD1D {.Q }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 5, INS_OPTS_SCALABLE_Q); + /// IF_SVE_IU_4A LD1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_C LD1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_II_4A LD1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_II_4A_B LD1D {.Q }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P0, REG_R3, REG_R4, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_D LD1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVector(Vector mask, long* address, Vector indices) => GatherVector(mask, address, indices); + + /// + /// svint64_t svld1_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// LD1D Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A LD1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V5, REG_P3, REG_R4, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IH_3A_A LD1D {.Q }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 5, INS_OPTS_SCALABLE_Q); + /// IF_SVE_IU_4A LD1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_C LD1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_II_4A LD1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_II_4A_B LD1D {.Q }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P0, REG_R3, REG_R4, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_D LD1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVector(Vector mask, Vector addresses) => GatherVector(mask, addresses); + + /// + /// svint64_t svld1_gather_[u64]index[_s64](svbool_t pg, const int64_t *base, svuint64_t indices) + /// LD1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A LD1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V5, REG_P3, REG_R4, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IH_3A_A LD1D {.Q }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 5, INS_OPTS_SCALABLE_Q); + /// IF_SVE_IU_4A LD1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_C LD1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_II_4A LD1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_II_4A_B LD1D {.Q }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P0, REG_R3, REG_R4, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_D LD1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVector(Vector mask, long* address, Vector indices) => GatherVector(mask, address, indices); + + /// + /// svuint32_t svld1_gather_[s32]index[_u32](svbool_t pg, const uint32_t *base, svint32_t indices) + /// LD1W Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #2] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVector(Vector mask, uint* address, Vector indices) => GatherVector(mask, address, indices); + + /// + /// svuint32_t svld1_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// LD1W Zresult.S, Pg/Z, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVector(Vector mask, Vector addresses) => GatherVector(mask, addresses); + + /// + /// svuint32_t svld1_gather_[u32]index[_u32](svbool_t pg, const uint32_t *base, svuint32_t indices) + /// LD1W Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #2] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVector(Vector mask, uint* address, Vector indices) => GatherVector(mask, address, indices); + + /// + /// svuint64_t svld1_gather_[s64]index[_u64](svbool_t pg, const uint64_t *base, svint64_t indices) + /// LD1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A LD1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V5, REG_P3, REG_R4, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IH_3A_A LD1D {.Q }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 5, INS_OPTS_SCALABLE_Q); + /// IF_SVE_IU_4A LD1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_C LD1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_II_4A LD1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_II_4A_B LD1D {.Q }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P0, REG_R3, REG_R4, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_D LD1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVector(Vector mask, ulong* address, Vector indices) => GatherVector(mask, address, indices); + + /// + /// svuint64_t svld1_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// LD1D Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A LD1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V5, REG_P3, REG_R4, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IH_3A_A LD1D {.Q }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 5, INS_OPTS_SCALABLE_Q); + /// IF_SVE_IU_4A LD1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_C LD1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_II_4A LD1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_II_4A_B LD1D {.Q }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P0, REG_R3, REG_R4, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_D LD1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVector(Vector mask, Vector addresses) => GatherVector(mask, addresses); + + /// + /// svuint64_t svld1_gather_[u64]index[_u64](svbool_t pg, const uint64_t *base, svuint64_t indices) + /// LD1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A LD1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V5, REG_P3, REG_R4, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IH_3A_A LD1D {.Q }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 5, INS_OPTS_SCALABLE_Q); + /// IF_SVE_IU_4A LD1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_C LD1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_II_4A LD1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_II_4A_B LD1D {.Q }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P0, REG_R3, REG_R4, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_D LD1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVector(Vector mask, ulong* address, Vector indices) => GatherVector(mask, address, indices); + + /// + /// svfloat32_t svld1_gather_[s32]index[_f32](svbool_t pg, const float32_t *base, svint32_t indices) + /// LD1W Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #2] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVector(Vector mask, float* address, Vector indices) => GatherVector(mask, address, indices); + + /// + /// svfloat32_t svld1_gather[_u32base]_f32(svbool_t pg, svuint32_t bases) + /// LD1W Zresult.S, Pg/Z, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVector(Vector mask, Vector addresses) => GatherVector(mask, addresses); + + /// + /// svfloat32_t svld1_gather_[u32]index[_f32](svbool_t pg, const float32_t *base, svuint32_t indices) + /// LD1W Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #2] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVector(Vector mask, float* address, Vector indices) => GatherVector(mask, address, indices); + + /// + /// svfloat64_t svld1_gather_[s64]index[_f64](svbool_t pg, const float64_t *base, svint64_t indices) + /// LD1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A LD1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V5, REG_P3, REG_R4, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IH_3A_A LD1D {.Q }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 5, INS_OPTS_SCALABLE_Q); + /// IF_SVE_IU_4A LD1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_C LD1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_II_4A LD1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_II_4A_B LD1D {.Q }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P0, REG_R3, REG_R4, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_D LD1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVector(Vector mask, double* address, Vector indices) => GatherVector(mask, address, indices); + + /// + /// svfloat64_t svld1_gather[_u64base]_f64(svbool_t pg, svuint64_t bases) + /// LD1D Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A LD1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V5, REG_P3, REG_R4, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IH_3A_A LD1D {.Q }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 5, INS_OPTS_SCALABLE_Q); + /// IF_SVE_IU_4A LD1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_C LD1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_II_4A LD1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_II_4A_B LD1D {.Q }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P0, REG_R3, REG_R4, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_D LD1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVector(Vector mask, Vector addresses) => GatherVector(mask, addresses); + + /// + /// svfloat64_t svld1_gather_[u64]index[_f64](svbool_t pg, const float64_t *base, svuint64_t indices) + /// LD1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A LD1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V5, REG_P3, REG_R4, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IH_3A_A LD1D {.Q }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 5, INS_OPTS_SCALABLE_Q); + /// IF_SVE_IU_4A LD1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_C LD1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_II_4A LD1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_II_4A_B LD1D {.Q }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P0, REG_R3, REG_R4, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_D LD1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVector(Vector mask, double* address, Vector indices) => GatherVector(mask, address, indices); + + + /// GatherVectorByteZeroExtend : Load 8-bit data and zero-extend + + /// + /// svint32_t svld1ub_gather_[s32]offset_s32(svbool_t pg, const uint8_t *base, svint32_t offsets) + /// LD1B Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_E LD1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_H LD1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices) => GatherVectorByteZeroExtend(mask, address, indices); + + /// + /// svint32_t svld1ub_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// LD1B Zresult.S, Pg/Z, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_E LD1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_H LD1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, Vector addresses) => GatherVectorByteZeroExtend(mask, addresses); + + /// + /// svint32_t svld1ub_gather_[u32]offset_s32(svbool_t pg, const uint8_t *base, svuint32_t offsets) + /// LD1B Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_E LD1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_H LD1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices) => GatherVectorByteZeroExtend(mask, address, indices); + + /// + /// svint64_t svld1ub_gather_[s64]offset_s64(svbool_t pg, const uint8_t *base, svint64_t offsets) + /// LD1B Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_E LD1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_H LD1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices) => GatherVectorByteZeroExtend(mask, address, indices); + + /// + /// svint64_t svld1ub_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// LD1B Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_E LD1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_H LD1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, Vector addresses) => GatherVectorByteZeroExtend(mask, addresses); + + /// + /// svint64_t svld1ub_gather_[u64]offset_s64(svbool_t pg, const uint8_t *base, svuint64_t offsets) + /// LD1B Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_E LD1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_H LD1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices) => GatherVectorByteZeroExtend(mask, address, indices); + + /// + /// svuint32_t svld1ub_gather_[s32]offset_u32(svbool_t pg, const uint8_t *base, svint32_t offsets) + /// LD1B Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_E LD1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_H LD1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices) => GatherVectorByteZeroExtend(mask, address, indices); + + /// + /// svuint32_t svld1ub_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// LD1B Zresult.S, Pg/Z, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_E LD1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_H LD1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, Vector addresses) => GatherVectorByteZeroExtend(mask, addresses); + + /// + /// svuint32_t svld1ub_gather_[u32]offset_u32(svbool_t pg, const uint8_t *base, svuint32_t offsets) + /// LD1B Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_E LD1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_H LD1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices) => GatherVectorByteZeroExtend(mask, address, indices); + + /// + /// svuint64_t svld1ub_gather_[s64]offset_u64(svbool_t pg, const uint8_t *base, svint64_t offsets) + /// LD1B Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_E LD1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_H LD1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices) => GatherVectorByteZeroExtend(mask, address, indices); + + /// + /// svuint64_t svld1ub_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// LD1B Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_E LD1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_H LD1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, Vector addresses) => GatherVectorByteZeroExtend(mask, addresses); + + /// + /// svuint64_t svld1ub_gather_[u64]offset_u64(svbool_t pg, const uint8_t *base, svuint64_t offsets) + /// LD1B Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_E LD1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_H LD1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices) => GatherVectorByteZeroExtend(mask, address, indices); + + + /// GatherVectorByteZeroExtendFirstFaulting : Load 8-bit data and zero-extend, first-faulting + + /// + /// svint32_t svldff1ub_gather_[s32]offset_s32(svbool_t pg, const uint8_t *base, svint32_t offsets) + /// LDFF1B Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V2, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_E LDFF1B {.B }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svint32_t svldff1ub_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// LDFF1B Zresult.S, Pg/Z, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V2, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_E LDFF1B {.B }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorByteZeroExtendFirstFaulting(mask, addresses); + + /// + /// svint32_t svldff1ub_gather_[u32]offset_s32(svbool_t pg, const uint8_t *base, svuint32_t offsets) + /// LDFF1B Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V2, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_E LDFF1B {.B }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1ub_gather_[s64]offset_s64(svbool_t pg, const uint8_t *base, svint64_t offsets) + /// LDFF1B Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V2, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_E LDFF1B {.B }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1ub_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// LDFF1B Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V2, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_E LDFF1B {.B }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorByteZeroExtendFirstFaulting(mask, addresses); + + /// + /// svint64_t svldff1ub_gather_[u64]offset_s64(svbool_t pg, const uint8_t *base, svuint64_t offsets) + /// LDFF1B Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V2, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_E LDFF1B {.B }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint32_t svldff1ub_gather_[s32]offset_u32(svbool_t pg, const uint8_t *base, svint32_t offsets) + /// LDFF1B Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V2, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_E LDFF1B {.B }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint32_t svldff1ub_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// LDFF1B Zresult.S, Pg/Z, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V2, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_E LDFF1B {.B }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorByteZeroExtendFirstFaulting(mask, addresses); + + /// + /// svuint32_t svldff1ub_gather_[u32]offset_u32(svbool_t pg, const uint8_t *base, svuint32_t offsets) + /// LDFF1B Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V2, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_E LDFF1B {.B }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1ub_gather_[s64]offset_u64(svbool_t pg, const uint8_t *base, svint64_t offsets) + /// LDFF1B Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V2, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_E LDFF1B {.B }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1ub_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// LDFF1B Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V2, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_E LDFF1B {.B }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorByteZeroExtendFirstFaulting(mask, addresses); + + /// + /// svuint64_t svldff1ub_gather_[u64]offset_u64(svbool_t pg, const uint8_t *base, svuint64_t offsets) + /// LDFF1B Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V2, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_E LDFF1B {.B }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendFirstFaulting(mask, address, offsets); + + + /// GatherVectorFirstFaulting : Unextended load, first-faulting + + /// + /// svint32_t svldff1_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// LDFF1W Zresult.S, Pg/Z, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses) => GatherVectorFirstFaulting(mask, addresses); + + /// + /// svint32_t svldff1_gather_[s32]index[_s32](svbool_t pg, const int32_t *base, svint32_t indices) + /// LDFF1W Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #2] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, int* address, Vector indices) => GatherVectorFirstFaulting(mask, address, indices); + + /// + /// svint32_t svldff1_gather_[u32]index[_s32](svbool_t pg, const int32_t *base, svuint32_t indices) + /// LDFF1W Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #2] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, int* address, Vector indices) => GatherVectorFirstFaulting(mask, address, indices); + + /// + /// svint64_t svldff1_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// LDFF1D Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1D {.D }, /Z, [{, , LSL #3}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V5, REG_P6, REG_R7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V2, REG_P6, REG_R5, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1d, EA_SCALABLE, REG_V7, REG_P3, REG_V1, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses) => GatherVectorFirstFaulting(mask, addresses); + + /// + /// svint64_t svldff1_gather_[s64]index[_s64](svbool_t pg, const int64_t *base, svint64_t indices) + /// LDFF1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1D {.D }, /Z, [{, , LSL #3}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V5, REG_P6, REG_R7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V2, REG_P6, REG_R5, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1d, EA_SCALABLE, REG_V7, REG_P3, REG_V1, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, long* address, Vector indices) => GatherVectorFirstFaulting(mask, address, indices); + + /// + /// svint64_t svldff1_gather_[u64]index[_s64](svbool_t pg, const int64_t *base, svuint64_t indices) + /// LDFF1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1D {.D }, /Z, [{, , LSL #3}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V5, REG_P6, REG_R7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V2, REG_P6, REG_R5, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1d, EA_SCALABLE, REG_V7, REG_P3, REG_V1, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, long* address, Vector indices) => GatherVectorFirstFaulting(mask, address, indices); + + /// + /// svuint32_t svldff1_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// LDFF1W Zresult.S, Pg/Z, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses) => GatherVectorFirstFaulting(mask, addresses); + + /// + /// svuint32_t svldff1_gather_[s32]index[_u32](svbool_t pg, const uint32_t *base, svint32_t indices) + /// LDFF1W Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #2] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, uint* address, Vector indices) => GatherVectorFirstFaulting(mask, address, indices); + + /// + /// svuint32_t svldff1_gather_[u32]index[_u32](svbool_t pg, const uint32_t *base, svuint32_t indices) + /// LDFF1W Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #2] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, uint* address, Vector indices) => GatherVectorFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// LDFF1D Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1D {.D }, /Z, [{, , LSL #3}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V5, REG_P6, REG_R7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V2, REG_P6, REG_R5, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1d, EA_SCALABLE, REG_V7, REG_P3, REG_V1, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses) => GatherVectorFirstFaulting(mask, addresses); + + /// + /// svuint64_t svldff1_gather_[s64]index[_u64](svbool_t pg, const uint64_t *base, svint64_t indices) + /// LDFF1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1D {.D }, /Z, [{, , LSL #3}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V5, REG_P6, REG_R7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V2, REG_P6, REG_R5, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1d, EA_SCALABLE, REG_V7, REG_P3, REG_V1, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, ulong* address, Vector indices) => GatherVectorFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1_gather_[u64]index[_u64](svbool_t pg, const uint64_t *base, svuint64_t indices) + /// LDFF1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1D {.D }, /Z, [{, , LSL #3}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V5, REG_P6, REG_R7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V2, REG_P6, REG_R5, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1d, EA_SCALABLE, REG_V7, REG_P3, REG_V1, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, ulong* address, Vector indices) => GatherVectorFirstFaulting(mask, address, indices); + + /// + /// svfloat32_t svldff1_gather_[s32]index[_f32](svbool_t pg, const float32_t *base, svint32_t indices) + /// LDFF1W Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #2] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, float* address, Vector indices) => GatherVectorFirstFaulting(mask, address, indices); + + /// + /// svfloat32_t svldff1_gather[_u32base]_f32(svbool_t pg, svuint32_t bases) + /// LDFF1W Zresult.S, Pg/Z, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses) => GatherVectorFirstFaulting(mask, addresses); + + /// + /// svfloat32_t svldff1_gather_[u32]index[_f32](svbool_t pg, const float32_t *base, svuint32_t indices) + /// LDFF1W Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #2] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, float* address, Vector indices) => GatherVectorFirstFaulting(mask, address, indices); + + /// + /// svfloat64_t svldff1_gather_[s64]index[_f64](svbool_t pg, const float64_t *base, svint64_t indices) + /// LDFF1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1D {.D }, /Z, [{, , LSL #3}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V5, REG_P6, REG_R7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V2, REG_P6, REG_R5, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1d, EA_SCALABLE, REG_V7, REG_P3, REG_V1, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, double* address, Vector indices) => GatherVectorFirstFaulting(mask, address, indices); + + /// + /// svfloat64_t svldff1_gather[_u64base]_f64(svbool_t pg, svuint64_t bases) + /// LDFF1D Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1D {.D }, /Z, [{, , LSL #3}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V5, REG_P6, REG_R7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V2, REG_P6, REG_R5, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1d, EA_SCALABLE, REG_V7, REG_P3, REG_V1, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses) => GatherVectorFirstFaulting(mask, addresses); + + /// + /// svfloat64_t svldff1_gather_[u64]index[_f64](svbool_t pg, const float64_t *base, svuint64_t indices) + /// LDFF1D Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1D {.D }, /Z, [{, , LSL #3}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V5, REG_P6, REG_R7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V2, REG_P6, REG_R5, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1d, EA_SCALABLE, REG_V7, REG_P3, REG_V1, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, double* address, Vector indices) => GatherVectorFirstFaulting(mask, address, indices); + + + /// GatherVectorInt16SignExtend : Load 16-bit data and sign-extend + + /// + /// svint32_t svld1sh_gather_[s32]index_s32(svbool_t pg, const int16_t *base, svint32_t indices) + /// LD1SH Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #1] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_F LD1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_G LD1SH {.S }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtend(mask, address, indices); + + /// + /// svint32_t svld1sh_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// LD1SH Zresult.S, Pg/Z, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_F LD1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_G LD1SH {.S }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, Vector addresses) => GatherVectorInt16SignExtend(mask, addresses); + + /// + /// svint32_t svld1sh_gather_[u32]index_s32(svbool_t pg, const int16_t *base, svuint32_t indices) + /// LD1SH Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #1] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_F LD1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_G LD1SH {.S }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtend(mask, address, indices); + + /// + /// svint64_t svld1sh_gather_[s64]index_s64(svbool_t pg, const int16_t *base, svint64_t indices) + /// LD1SH Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_F LD1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_G LD1SH {.S }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtend(mask, address, indices); + + /// + /// svint64_t svld1sh_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// LD1SH Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_F LD1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_G LD1SH {.S }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, Vector addresses) => GatherVectorInt16SignExtend(mask, addresses); + + /// + /// svint64_t svld1sh_gather_[u64]index_s64(svbool_t pg, const int16_t *base, svuint64_t indices) + /// LD1SH Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_F LD1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_G LD1SH {.S }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtend(mask, address, indices); + + /// + /// svuint32_t svld1sh_gather_[s32]index_u32(svbool_t pg, const int16_t *base, svint32_t indices) + /// LD1SH Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #1] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_F LD1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_G LD1SH {.S }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtend(mask, address, indices); + + /// + /// svuint32_t svld1sh_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// LD1SH Zresult.S, Pg/Z, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_F LD1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_G LD1SH {.S }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, Vector addresses) => GatherVectorInt16SignExtend(mask, addresses); + + /// + /// svuint32_t svld1sh_gather_[u32]index_u32(svbool_t pg, const int16_t *base, svuint32_t indices) + /// LD1SH Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #1] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_F LD1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_G LD1SH {.S }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtend(mask, address, indices); + + /// + /// svuint64_t svld1sh_gather_[s64]index_u64(svbool_t pg, const int16_t *base, svint64_t indices) + /// LD1SH Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_F LD1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_G LD1SH {.S }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtend(mask, address, indices); + + /// + /// svuint64_t svld1sh_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// LD1SH Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_F LD1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_G LD1SH {.S }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, Vector addresses) => GatherVectorInt16SignExtend(mask, addresses); + + /// + /// svuint64_t svld1sh_gather_[u64]index_u64(svbool_t pg, const int16_t *base, svuint64_t indices) + /// LD1SH Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_F LD1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_G LD1SH {.S }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtend(mask, address, indices); + + + /// GatherVectorInt16SignExtendFirstFaulting : Load 16-bit data and sign-extend, first-faulting + + /// + /// svint32_t svldff1sh_gather_[s32]index_s32(svbool_t pg, const int16_t *base, svint32_t indices) + /// LDFF1SH Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1SH {.S }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtendFirstFaulting(mask, address, indices); + + /// + /// svint32_t svldff1sh_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// LDFF1SH Zresult.S, Pg/Z, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1SH {.S }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorInt16SignExtendFirstFaulting(mask, addresses); + + /// + /// svint32_t svldff1sh_gather_[u32]index_s32(svbool_t pg, const int16_t *base, svuint32_t indices) + /// LDFF1SH Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1SH {.S }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtendFirstFaulting(mask, address, indices); + + /// + /// svint64_t svldff1sh_gather_[s64]index_s64(svbool_t pg, const int16_t *base, svint64_t indices) + /// LDFF1SH Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1SH {.S }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtendFirstFaulting(mask, address, indices); + + /// + /// svint64_t svldff1sh_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// LDFF1SH Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1SH {.S }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorInt16SignExtendFirstFaulting(mask, addresses); + + /// + /// svint64_t svldff1sh_gather_[u64]index_s64(svbool_t pg, const int16_t *base, svuint64_t indices) + /// LDFF1SH Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1SH {.S }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtendFirstFaulting(mask, address, indices); + + /// + /// svuint32_t svldff1sh_gather_[s32]index_u32(svbool_t pg, const int16_t *base, svint32_t indices) + /// LDFF1SH Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1SH {.S }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtendFirstFaulting(mask, address, indices); + + /// + /// svuint32_t svldff1sh_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// LDFF1SH Zresult.S, Pg/Z, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1SH {.S }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorInt16SignExtendFirstFaulting(mask, addresses); + + /// + /// svuint32_t svldff1sh_gather_[u32]index_u32(svbool_t pg, const int16_t *base, svuint32_t indices) + /// LDFF1SH Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1SH {.S }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtendFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1sh_gather_[s64]index_u64(svbool_t pg, const int16_t *base, svint64_t indices) + /// LDFF1SH Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1SH {.S }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtendFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1sh_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// LDFF1SH Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1SH {.S }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorInt16SignExtendFirstFaulting(mask, addresses); + + /// + /// svuint64_t svldff1sh_gather_[u64]index_u64(svbool_t pg, const int16_t *base, svuint64_t indices) + /// LDFF1SH Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1SH {.S }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtendFirstFaulting(mask, address, indices); + + + /// GatherVectorInt16WithByteOffsetsSignExtend : Load 16-bit data and sign-extend + + /// + /// svint32_t svld1sh_gather_[s32]offset_s32(svbool_t pg, const int16_t *base, svint32_t offsets) + /// LD1SH Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_F LD1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_G LD1SH {.S }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svint32_t svld1sh_gather_[u32]offset_s32(svbool_t pg, const int16_t *base, svuint32_t offsets) + /// LD1SH Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_F LD1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_G LD1SH {.S }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svint64_t svld1sh_gather_[s64]offset_s64(svbool_t pg, const int16_t *base, svint64_t offsets) + /// LD1SH Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_F LD1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_G LD1SH {.S }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svint64_t svld1sh_gather_[u64]offset_s64(svbool_t pg, const int16_t *base, svuint64_t offsets) + /// LD1SH Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_F LD1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_G LD1SH {.S }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svuint32_t svld1sh_gather_[s32]offset_u32(svbool_t pg, const int16_t *base, svint32_t offsets) + /// LD1SH Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_F LD1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_G LD1SH {.S }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svuint32_t svld1sh_gather_[u32]offset_u32(svbool_t pg, const int16_t *base, svuint32_t offsets) + /// LD1SH Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_F LD1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_G LD1SH {.S }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svuint64_t svld1sh_gather_[s64]offset_u64(svbool_t pg, const int16_t *base, svint64_t offsets) + /// LD1SH Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_F LD1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_G LD1SH {.S }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svuint64_t svld1sh_gather_[u64]offset_u64(svbool_t pg, const int16_t *base, svuint64_t offsets) + /// LD1SH Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_F LD1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_G LD1SH {.S }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtend(mask, address, offsets); + + + /// GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting : Load 16-bit data and sign-extend, first-faulting + + /// + /// svint32_t svldff1sh_gather_[s32]offset_s32(svbool_t pg, const int16_t *base, svint32_t offsets) + /// LDFF1SH Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1SH {.S }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svint32_t svldff1sh_gather_[u32]offset_s32(svbool_t pg, const int16_t *base, svuint32_t offsets) + /// LDFF1SH Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1SH {.S }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1sh_gather_[s64]offset_s64(svbool_t pg, const int16_t *base, svint64_t offsets) + /// LDFF1SH Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1SH {.S }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1sh_gather_[u64]offset_s64(svbool_t pg, const int16_t *base, svuint64_t offsets) + /// LDFF1SH Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1SH {.S }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint32_t svldff1sh_gather_[s32]offset_u32(svbool_t pg, const int16_t *base, svint32_t offsets) + /// LDFF1SH Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1SH {.S }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint32_t svldff1sh_gather_[u32]offset_u32(svbool_t pg, const int16_t *base, svuint32_t offsets) + /// LDFF1SH Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1SH {.S }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1sh_gather_[s64]offset_u64(svbool_t pg, const int16_t *base, svint64_t offsets) + /// LDFF1SH Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1SH {.S }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1sh_gather_[u64]offset_u64(svbool_t pg, const int16_t *base, svuint64_t offsets) + /// LDFF1SH Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1SH {.S }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + + /// GatherVectorInt32SignExtend : Load 32-bit data and sign-extend + + /// + /// svint64_t svld1sw_gather_[s64]index_s64(svbool_t pg, const int32_t *base, svint64_t indices) + /// LD1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A LD1SW {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P5, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IU_4A LD1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LD1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IK_4A LD1SW {.D }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LD1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V6, REG_P5, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtend(mask, address, indices); + + /// + /// svint64_t svld1sw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// LD1SW Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A LD1SW {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P5, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IU_4A LD1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LD1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IK_4A LD1SW {.D }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LD1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V6, REG_P5, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, Vector addresses) => GatherVectorInt32SignExtend(mask, addresses); + + /// + /// svint64_t svld1sw_gather_[u64]index_s64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// LD1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A LD1SW {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P5, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IU_4A LD1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LD1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IK_4A LD1SW {.D }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LD1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V6, REG_P5, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtend(mask, address, indices); + + /// + /// svint64_t svld1sw_gather_[s64]index_s64(svbool_t pg, const int32_t *base, svint64_t indices) + /// LD1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A LD1SW {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P5, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IU_4A LD1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LD1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IK_4A LD1SW {.D }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LD1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V6, REG_P5, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtend(mask, address, indices); + + /// + /// svint64_t svld1sw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// LD1SW Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A LD1SW {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P5, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IU_4A LD1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LD1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IK_4A LD1SW {.D }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LD1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V6, REG_P5, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, Vector addresses) => GatherVectorInt32SignExtend(mask, addresses); + + /// + /// svint64_t svld1sw_gather_[u64]index_s64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// LD1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A LD1SW {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P5, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IU_4A LD1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LD1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IK_4A LD1SW {.D }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LD1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V6, REG_P5, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtend(mask, address, indices); + + /// + /// svuint64_t svld1sw_gather_[s64]index_u64(svbool_t pg, const int32_t *base, svint64_t indices) + /// LD1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A LD1SW {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P5, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IU_4A LD1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LD1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IK_4A LD1SW {.D }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LD1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V6, REG_P5, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtend(mask, address, indices); + + /// + /// svuint64_t svld1sw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// LD1SW Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A LD1SW {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P5, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IU_4A LD1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LD1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IK_4A LD1SW {.D }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LD1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V6, REG_P5, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, Vector addresses) => GatherVectorInt32SignExtend(mask, addresses); + + /// + /// svuint64_t svld1sw_gather_[u64]index_u64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// LD1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A LD1SW {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P5, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IU_4A LD1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LD1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IK_4A LD1SW {.D }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LD1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V6, REG_P5, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtend(mask, address, indices); + + /// + /// svuint64_t svld1sw_gather_[s64]index_u64(svbool_t pg, const int32_t *base, svint64_t indices) + /// LD1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A LD1SW {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P5, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IU_4A LD1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LD1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IK_4A LD1SW {.D }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LD1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V6, REG_P5, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtend(mask, address, indices); + + /// + /// svuint64_t svld1sw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// LD1SW Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A LD1SW {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P5, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IU_4A LD1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LD1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IK_4A LD1SW {.D }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LD1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V6, REG_P5, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, Vector addresses) => GatherVectorInt32SignExtend(mask, addresses); + + /// + /// svuint64_t svld1sw_gather_[u64]index_u64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// LD1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A LD1SW {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P5, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IU_4A LD1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LD1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IK_4A LD1SW {.D }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LD1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V6, REG_P5, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtend(mask, address, indices); + + + /// GatherVectorInt32SignExtendFirstFaulting : Load 32-bit data and sign-extend, first-faulting + + /// + /// svint64_t svldff1sw_gather_[s64]index_s64(svbool_t pg, const int32_t *base, svint64_t indices) + /// LDFF1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1SW {.D }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P0, REG_R10, REG_V9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R6, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P0, REG_V4, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendFirstFaulting(mask, address, indices); + + /// + /// svint64_t svldff1sw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// LDFF1SW Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1SW {.D }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P0, REG_R10, REG_V9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R6, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P0, REG_V4, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorInt32SignExtendFirstFaulting(mask, addresses); + + /// + /// svint64_t svldff1sw_gather_[u64]index_s64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// LDFF1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1SW {.D }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P0, REG_R10, REG_V9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R6, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P0, REG_V4, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendFirstFaulting(mask, address, indices); + + /// + /// svint64_t svldff1sw_gather_[s64]index_s64(svbool_t pg, const int32_t *base, svint64_t indices) + /// LDFF1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1SW {.D }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P0, REG_R10, REG_V9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R6, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P0, REG_V4, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendFirstFaulting(mask, address, indices); + + /// + /// svint64_t svldff1sw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// LDFF1SW Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1SW {.D }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P0, REG_R10, REG_V9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R6, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P0, REG_V4, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorInt32SignExtendFirstFaulting(mask, addresses); + + /// + /// svint64_t svldff1sw_gather_[u64]index_s64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// LDFF1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1SW {.D }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P0, REG_R10, REG_V9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R6, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P0, REG_V4, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1sw_gather_[s64]index_u64(svbool_t pg, const int32_t *base, svint64_t indices) + /// LDFF1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1SW {.D }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P0, REG_R10, REG_V9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R6, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P0, REG_V4, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1sw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// LDFF1SW Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1SW {.D }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P0, REG_R10, REG_V9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R6, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P0, REG_V4, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorInt32SignExtendFirstFaulting(mask, addresses); + + /// + /// svuint64_t svldff1sw_gather_[u64]index_u64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// LDFF1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1SW {.D }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P0, REG_R10, REG_V9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R6, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P0, REG_V4, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1sw_gather_[s64]index_u64(svbool_t pg, const int32_t *base, svint64_t indices) + /// LDFF1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1SW {.D }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P0, REG_R10, REG_V9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R6, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P0, REG_V4, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1sw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// LDFF1SW Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1SW {.D }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P0, REG_R10, REG_V9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R6, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P0, REG_V4, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorInt32SignExtendFirstFaulting(mask, addresses); + + /// + /// svuint64_t svldff1sw_gather_[u64]index_u64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// LDFF1SW Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1SW {.D }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P0, REG_R10, REG_V9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R6, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P0, REG_V4, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendFirstFaulting(mask, address, indices); + + + /// GatherVectorInt32WithByteOffsetsSignExtend : Load 32-bit data and sign-extend + + /// + /// svint64_t svld1sw_gather_[s64]offset_s64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// LD1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A LD1SW {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P5, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IU_4A LD1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LD1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IK_4A LD1SW {.D }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LD1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V6, REG_P5, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svint64_t svld1sw_gather_[u64]offset_s64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// LD1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A LD1SW {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P5, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IU_4A LD1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LD1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IK_4A LD1SW {.D }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LD1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V6, REG_P5, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svint64_t svld1sw_gather_[s64]offset_s64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// LD1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A LD1SW {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P5, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IU_4A LD1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LD1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IK_4A LD1SW {.D }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LD1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V6, REG_P5, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svint64_t svld1sw_gather_[u64]offset_s64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// LD1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A LD1SW {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P5, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IU_4A LD1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LD1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IK_4A LD1SW {.D }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LD1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V6, REG_P5, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svuint64_t svld1sw_gather_[s64]offset_u64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// LD1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A LD1SW {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P5, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IU_4A LD1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LD1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IK_4A LD1SW {.D }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LD1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V6, REG_P5, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svuint64_t svld1sw_gather_[u64]offset_u64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// LD1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A LD1SW {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P5, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IU_4A LD1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LD1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IK_4A LD1SW {.D }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LD1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V6, REG_P5, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svuint64_t svld1sw_gather_[s64]offset_u64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// LD1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A LD1SW {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P5, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IU_4A LD1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LD1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IK_4A LD1SW {.D }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LD1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V6, REG_P5, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtend(mask, address, offsets); + + /// + /// svuint64_t svld1sw_gather_[u64]offset_u64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// LD1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A LD1SW {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P5, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IU_4A LD1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LD1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IK_4A LD1SW {.D }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LD1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V6, REG_P5, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtend(mask, address, offsets); + + + /// GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting : Load 32-bit data and sign-extend, first-faulting + + /// + /// svint64_t svldff1sw_gather_[s64]offset_s64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// LDFF1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1SW {.D }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P0, REG_R10, REG_V9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R6, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P0, REG_V4, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1sw_gather_[u64]offset_s64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// LDFF1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1SW {.D }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P0, REG_R10, REG_V9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R6, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P0, REG_V4, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1sw_gather_[s64]offset_s64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// LDFF1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1SW {.D }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P0, REG_R10, REG_V9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R6, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P0, REG_V4, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1sw_gather_[u64]offset_s64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// LDFF1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1SW {.D }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P0, REG_R10, REG_V9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R6, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P0, REG_V4, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1sw_gather_[s64]offset_u64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// LDFF1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1SW {.D }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P0, REG_R10, REG_V9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R6, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P0, REG_V4, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1sw_gather_[u64]offset_u64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// LDFF1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1SW {.D }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P0, REG_R10, REG_V9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R6, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P0, REG_V4, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1sw_gather_[s64]offset_u64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// LDFF1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1SW {.D }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P0, REG_R10, REG_V9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R6, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P0, REG_V4, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1sw_gather_[u64]offset_u64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// LDFF1SW Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1SW {.D }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P0, REG_R10, REG_V9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R6, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P0, REG_V4, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting(mask, address, offsets); + + + /// GatherVectorSByteSignExtend : Load 8-bit data and sign-extend + + /// + /// svint32_t svld1sb_gather_[s32]offset_s32(svbool_t pg, const int8_t *base, svint32_t offsets) + /// LD1SB Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_D LD1SB {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V6, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_F LD1SB {.H }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices) => GatherVectorSByteSignExtend(mask, address, indices); + + /// + /// svint32_t svld1sb_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// LD1SB Zresult.S, Pg/Z, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_D LD1SB {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V6, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_F LD1SB {.H }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, Vector addresses) => GatherVectorSByteSignExtend(mask, addresses); + + /// + /// svint32_t svld1sb_gather_[u32]offset_s32(svbool_t pg, const int8_t *base, svuint32_t offsets) + /// LD1SB Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_D LD1SB {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V6, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_F LD1SB {.H }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices) => GatherVectorSByteSignExtend(mask, address, indices); + + /// + /// svint64_t svld1sb_gather_[s64]offset_s64(svbool_t pg, const int8_t *base, svint64_t offsets) + /// LD1SB Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_D LD1SB {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V6, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_F LD1SB {.H }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices) => GatherVectorSByteSignExtend(mask, address, indices); + + /// + /// svint64_t svld1sb_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// LD1SB Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_D LD1SB {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V6, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_F LD1SB {.H }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, Vector addresses) => GatherVectorSByteSignExtend(mask, addresses); + + /// + /// svint64_t svld1sb_gather_[u64]offset_s64(svbool_t pg, const int8_t *base, svuint64_t offsets) + /// LD1SB Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_D LD1SB {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V6, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_F LD1SB {.H }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices) => GatherVectorSByteSignExtend(mask, address, indices); + + /// + /// svuint32_t svld1sb_gather_[s32]offset_u32(svbool_t pg, const int8_t *base, svint32_t offsets) + /// LD1SB Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_D LD1SB {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V6, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_F LD1SB {.H }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices) => GatherVectorSByteSignExtend(mask, address, indices); + + /// + /// svuint32_t svld1sb_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// LD1SB Zresult.S, Pg/Z, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_D LD1SB {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V6, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_F LD1SB {.H }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, Vector addresses) => GatherVectorSByteSignExtend(mask, addresses); + + /// + /// svuint32_t svld1sb_gather_[u32]offset_u32(svbool_t pg, const int8_t *base, svuint32_t offsets) + /// LD1SB Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_D LD1SB {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V6, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_F LD1SB {.H }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices) => GatherVectorSByteSignExtend(mask, address, indices); + + /// + /// svuint64_t svld1sb_gather_[s64]offset_u64(svbool_t pg, const int8_t *base, svint64_t offsets) + /// LD1SB Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_D LD1SB {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V6, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_F LD1SB {.H }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices) => GatherVectorSByteSignExtend(mask, address, indices); + + /// + /// svuint64_t svld1sb_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// LD1SB Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_D LD1SB {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V6, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_F LD1SB {.H }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, Vector addresses) => GatherVectorSByteSignExtend(mask, addresses); + + /// + /// svuint64_t svld1sb_gather_[u64]offset_u64(svbool_t pg, const int8_t *base, svuint64_t offsets) + /// LD1SB Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_D LD1SB {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V6, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_F LD1SB {.H }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices) => GatherVectorSByteSignExtend(mask, address, indices); + + + /// GatherVectorSByteSignExtendFirstFaulting : Load 8-bit data and sign-extend, first-faulting + + /// + /// svint32_t svldff1sb_gather_[s32]offset_s32(svbool_t pg, const int8_t *base, svint32_t offsets) + /// LDFF1SB Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_D LDFF1SB {.H }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svint32_t svldff1sb_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// LDFF1SB Zresult.S, Pg/Z, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_D LDFF1SB {.H }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorSByteSignExtendFirstFaulting(mask, addresses); + + /// + /// svint32_t svldff1sb_gather_[u32]offset_s32(svbool_t pg, const int8_t *base, svuint32_t offsets) + /// LDFF1SB Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_D LDFF1SB {.H }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1sb_gather_[s64]offset_s64(svbool_t pg, const int8_t *base, svint64_t offsets) + /// LDFF1SB Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_D LDFF1SB {.H }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1sb_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// LDFF1SB Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_D LDFF1SB {.H }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorSByteSignExtendFirstFaulting(mask, addresses); + + /// + /// svint64_t svldff1sb_gather_[u64]offset_s64(svbool_t pg, const int8_t *base, svuint64_t offsets) + /// LDFF1SB Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_D LDFF1SB {.H }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint32_t svldff1sb_gather_[s32]offset_u32(svbool_t pg, const int8_t *base, svint32_t offsets) + /// LDFF1SB Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_D LDFF1SB {.H }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint32_t svldff1sb_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// LDFF1SB Zresult.S, Pg/Z, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_D LDFF1SB {.H }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorSByteSignExtendFirstFaulting(mask, addresses); + + /// + /// svuint32_t svldff1sb_gather_[u32]offset_u32(svbool_t pg, const int8_t *base, svuint32_t offsets) + /// LDFF1SB Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_D LDFF1SB {.H }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1sb_gather_[s64]offset_u64(svbool_t pg, const int8_t *base, svint64_t offsets) + /// LDFF1SB Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_D LDFF1SB {.H }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1sb_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// LDFF1SB Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_D LDFF1SB {.H }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorSByteSignExtendFirstFaulting(mask, addresses); + + /// + /// svuint64_t svldff1sb_gather_[u64]offset_u64(svbool_t pg, const int8_t *base, svuint64_t offsets) + /// LDFF1SB Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_D LDFF1SB {.H }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendFirstFaulting(mask, address, offsets); + + + /// GatherVectorUInt16WithByteOffsetsZeroExtend : Load 16-bit data and zero-extend + + /// + /// svint32_t svld1uh_gather_[s32]offset_s32(svbool_t pg, const uint16_t *base, svint32_t offsets) + /// LD1H Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svint32_t svld1uh_gather_[u32]offset_s32(svbool_t pg, const uint16_t *base, svuint32_t offsets) + /// LD1H Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svint64_t svld1uh_gather_[s64]offset_s64(svbool_t pg, const uint16_t *base, svint64_t offsets) + /// LD1H Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svint64_t svld1uh_gather_[u64]offset_s64(svbool_t pg, const uint16_t *base, svuint64_t offsets) + /// LD1H Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svuint32_t svld1uh_gather_[s32]offset_u32(svbool_t pg, const uint16_t *base, svint32_t offsets) + /// LD1H Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svuint32_t svld1uh_gather_[u32]offset_u32(svbool_t pg, const uint16_t *base, svuint32_t offsets) + /// LD1H Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svuint64_t svld1uh_gather_[s64]offset_u64(svbool_t pg, const uint16_t *base, svint64_t offsets) + /// LD1H Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svuint64_t svld1uh_gather_[u64]offset_u64(svbool_t pg, const uint16_t *base, svuint64_t offsets) + /// LD1H Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtend(mask, address, offsets); + + + /// GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting : Load 16-bit data and zero-extend, first-faulting + + /// + /// svint32_t svldff1uh_gather_[s32]offset_s32(svbool_t pg, const uint16_t *base, svint32_t offsets) + /// LDFF1H Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svint32_t svldff1uh_gather_[u32]offset_s32(svbool_t pg, const uint16_t *base, svuint32_t offsets) + /// LDFF1H Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1uh_gather_[s64]offset_s64(svbool_t pg, const uint16_t *base, svint64_t offsets) + /// LDFF1H Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1uh_gather_[u64]offset_s64(svbool_t pg, const uint16_t *base, svuint64_t offsets) + /// LDFF1H Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint32_t svldff1uh_gather_[s32]offset_u32(svbool_t pg, const uint16_t *base, svint32_t offsets) + /// LDFF1H Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint32_t svldff1uh_gather_[u32]offset_u32(svbool_t pg, const uint16_t *base, svuint32_t offsets) + /// LDFF1H Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1uh_gather_[s64]offset_u64(svbool_t pg, const uint16_t *base, svint64_t offsets) + /// LDFF1H Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1uh_gather_[u64]offset_u64(svbool_t pg, const uint16_t *base, svuint64_t offsets) + /// LDFF1H Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + + /// GatherVectorUInt16ZeroExtend : Load 16-bit data and zero-extend + + /// + /// svint32_t svld1uh_gather_[s32]index_s32(svbool_t pg, const uint16_t *base, svint32_t indices) + /// LD1H Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #1] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtend(mask, address, indices); + + /// + /// svint32_t svld1uh_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// LD1H Zresult.S, Pg/Z, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, Vector addresses) => GatherVectorUInt16ZeroExtend(mask, addresses); + + /// + /// svint32_t svld1uh_gather_[u32]index_s32(svbool_t pg, const uint16_t *base, svuint32_t indices) + /// LD1H Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #1] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtend(mask, address, indices); + + /// + /// svint64_t svld1uh_gather_[s64]index_s64(svbool_t pg, const uint16_t *base, svint64_t indices) + /// LD1H Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtend(mask, address, indices); + + /// + /// svint64_t svld1uh_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// LD1H Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, Vector addresses) => GatherVectorUInt16ZeroExtend(mask, addresses); + + /// + /// svint64_t svld1uh_gather_[u64]index_s64(svbool_t pg, const uint16_t *base, svuint64_t indices) + /// LD1H Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtend(mask, address, indices); + + /// + /// svuint32_t svld1uh_gather_[s32]index_u32(svbool_t pg, const uint16_t *base, svint32_t indices) + /// LD1H Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #1] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtend(mask, address, indices); + + /// + /// svuint32_t svld1uh_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// LD1H Zresult.S, Pg/Z, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, Vector addresses) => GatherVectorUInt16ZeroExtend(mask, addresses); + + /// + /// svuint32_t svld1uh_gather_[u32]index_u32(svbool_t pg, const uint16_t *base, svuint32_t indices) + /// LD1H Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #1] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtend(mask, address, indices); + + /// + /// svuint64_t svld1uh_gather_[s64]index_u64(svbool_t pg, const uint16_t *base, svint64_t indices) + /// LD1H Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtend(mask, address, indices); + + /// + /// svuint64_t svld1uh_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// LD1H Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, Vector addresses) => GatherVectorUInt16ZeroExtend(mask, addresses); + + /// + /// svuint64_t svld1uh_gather_[u64]index_u64(svbool_t pg, const uint16_t *base, svuint64_t indices) + /// LD1H Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtend(mask, address, indices); + + + /// GatherVectorUInt16ZeroExtendFirstFaulting : Load 16-bit data and zero-extend, first-faulting + + /// + /// svint32_t svldff1uh_gather_[s32]index_s32(svbool_t pg, const uint16_t *base, svint32_t indices) + /// LDFF1H Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svint32_t svldff1uh_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// LDFF1H Zresult.S, Pg/Z, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorUInt16ZeroExtendFirstFaulting(mask, addresses); + + /// + /// svint32_t svldff1uh_gather_[u32]index_s32(svbool_t pg, const uint16_t *base, svuint32_t indices) + /// LDFF1H Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svint64_t svldff1uh_gather_[s64]index_s64(svbool_t pg, const uint16_t *base, svint64_t indices) + /// LDFF1H Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svint64_t svldff1uh_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// LDFF1H Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorUInt16ZeroExtendFirstFaulting(mask, addresses); + + /// + /// svint64_t svldff1uh_gather_[u64]index_s64(svbool_t pg, const uint16_t *base, svuint64_t indices) + /// LDFF1H Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svuint32_t svldff1uh_gather_[s32]index_u32(svbool_t pg, const uint16_t *base, svint32_t indices) + /// LDFF1H Zresult.S, Pg/Z, [Xbase, Zindices.S, SXTW #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svuint32_t svldff1uh_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// LDFF1H Zresult.S, Pg/Z, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorUInt16ZeroExtendFirstFaulting(mask, addresses); + + /// + /// svuint32_t svldff1uh_gather_[u32]index_u32(svbool_t pg, const uint16_t *base, svuint32_t indices) + /// LDFF1H Zresult.S, Pg/Z, [Xbase, Zindices.S, UXTW #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1uh_gather_[s64]index_u64(svbool_t pg, const uint16_t *base, svint64_t indices) + /// LDFF1H Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1uh_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// LDFF1H Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorUInt16ZeroExtendFirstFaulting(mask, addresses); + + /// + /// svuint64_t svldff1uh_gather_[u64]index_u64(svbool_t pg, const uint16_t *base, svuint64_t indices) + /// LDFF1H Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtendFirstFaulting(mask, address, indices); + + + /// GatherVectorUInt32WithByteOffsetsZeroExtend : Load 32-bit data and zero-extend + + /// + /// svint64_t svld1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svint64_t svld1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svint64_t svld1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svint64_t svld1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svuint64_t svld1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svuint64_t svld1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svuint64_t svld1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtend(mask, address, offsets); + + /// + /// svuint64_t svld1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// LD1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtend(mask, address, offsets); + + + /// GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting : Load 32-bit data and zero-extend, first-faulting + + /// + /// svint64_t svldff1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting(mask, address, offsets); + + + /// GatherVectorUInt32ZeroExtend : Load 32-bit data and zero-extend + + /// + /// svint64_t svld1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtend(mask, address, indices); + + /// + /// svint64_t svld1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// LD1W Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtend(mask, addresses); + + /// + /// svint64_t svld1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtend(mask, address, indices); + + /// + /// svint64_t svld1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtend(mask, address, indices); + + /// + /// svint64_t svld1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// LD1W Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtend(mask, addresses); + + /// + /// svint64_t svld1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtend(mask, address, indices); + + /// + /// svuint64_t svld1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtend(mask, address, indices); + + /// + /// svuint64_t svld1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// LD1W Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtend(mask, addresses); + + /// + /// svuint64_t svld1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtend(mask, address, indices); + + /// + /// svuint64_t svld1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtend(mask, address, indices); + + /// + /// svuint64_t svld1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// LD1W Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtend(mask, addresses); + + /// + /// svuint64_t svld1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// LD1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtend(mask, address, indices); + + + /// GatherVectorUInt32ZeroExtendFirstFaulting : Load 32-bit data and zero-extend, first-faulting + + /// + /// svint64_t svldff1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svint64_t svldff1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// LDFF1W Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, addresses); + + /// + /// svint64_t svldff1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svint64_t svldff1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svint64_t svldff1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// LDFF1W Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, addresses); + + /// + /// svint64_t svldff1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// LDFF1W Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, addresses); + + /// + /// svuint64_t svldff1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, address, indices); + + /// + /// svuint64_t svldff1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// LDFF1W Zresult.D, Pg/Z, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, addresses); + + /// + /// svuint64_t svldff1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// LDFF1W Zresult.D, Pg/Z, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendFirstFaulting(mask, address, indices); + + + /// GatherVectorWithByteOffsetFirstFaulting : Unextended load, first-faulting + + /// + /// svint32_t svldff1_gather_[s32]offset[_s32](svbool_t pg, const int32_t *base, svint32_t offsets) + /// LDFF1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, int* address, Vector offsets) => GatherVectorWithByteOffsetFirstFaulting(mask, address, offsets); + + /// + /// svint32_t svldff1_gather_[u32]offset[_s32](svbool_t pg, const int32_t *base, svuint32_t offsets) + /// LDFF1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, int* address, Vector offsets) => GatherVectorWithByteOffsetFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1_gather_[s64]offset[_s64](svbool_t pg, const int64_t *base, svint64_t offsets) + /// LDFF1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1D {.D }, /Z, [{, , LSL #3}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V5, REG_P6, REG_R7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V2, REG_P6, REG_R5, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1d, EA_SCALABLE, REG_V7, REG_P3, REG_V1, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, long* address, Vector offsets) => GatherVectorWithByteOffsetFirstFaulting(mask, address, offsets); + + /// + /// svint64_t svldff1_gather_[u64]offset[_s64](svbool_t pg, const int64_t *base, svuint64_t offsets) + /// LDFF1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1D {.D }, /Z, [{, , LSL #3}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V5, REG_P6, REG_R7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V2, REG_P6, REG_R5, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1d, EA_SCALABLE, REG_V7, REG_P3, REG_V1, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, long* address, Vector offsets) => GatherVectorWithByteOffsetFirstFaulting(mask, address, offsets); + + /// + /// svuint32_t svldff1_gather_[s32]offset[_u32](svbool_t pg, const uint32_t *base, svint32_t offsets) + /// LDFF1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, uint* address, Vector offsets) => GatherVectorWithByteOffsetFirstFaulting(mask, address, offsets); + + /// + /// svuint32_t svldff1_gather_[u32]offset[_u32](svbool_t pg, const uint32_t *base, svuint32_t offsets) + /// LDFF1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, uint* address, Vector offsets) => GatherVectorWithByteOffsetFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1_gather_[s64]offset[_u64](svbool_t pg, const uint64_t *base, svint64_t offsets) + /// LDFF1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1D {.D }, /Z, [{, , LSL #3}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V5, REG_P6, REG_R7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V2, REG_P6, REG_R5, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1d, EA_SCALABLE, REG_V7, REG_P3, REG_V1, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, ulong* address, Vector offsets) => GatherVectorWithByteOffsetFirstFaulting(mask, address, offsets); + + /// + /// svuint64_t svldff1_gather_[u64]offset[_u64](svbool_t pg, const uint64_t *base, svuint64_t offsets) + /// LDFF1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1D {.D }, /Z, [{, , LSL #3}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V5, REG_P6, REG_R7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V2, REG_P6, REG_R5, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1d, EA_SCALABLE, REG_V7, REG_P3, REG_V1, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, ulong* address, Vector offsets) => GatherVectorWithByteOffsetFirstFaulting(mask, address, offsets); + + /// + /// svfloat32_t svldff1_gather_[s32]offset[_f32](svbool_t pg, const float32_t *base, svint32_t offsets) + /// LDFF1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, float* address, Vector offsets) => GatherVectorWithByteOffsetFirstFaulting(mask, address, offsets); + + /// + /// svfloat32_t svldff1_gather_[u32]offset[_f32](svbool_t pg, const float32_t *base, svuint32_t offsets) + /// LDFF1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, float* address, Vector offsets) => GatherVectorWithByteOffsetFirstFaulting(mask, address, offsets); + + /// + /// svfloat64_t svldff1_gather_[s64]offset[_f64](svbool_t pg, const float64_t *base, svint64_t offsets) + /// LDFF1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1D {.D }, /Z, [{, , LSL #3}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V5, REG_P6, REG_R7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V2, REG_P6, REG_R5, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1d, EA_SCALABLE, REG_V7, REG_P3, REG_V1, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, double* address, Vector offsets) => GatherVectorWithByteOffsetFirstFaulting(mask, address, offsets); + + /// + /// svfloat64_t svldff1_gather_[u64]offset[_f64](svbool_t pg, const float64_t *base, svuint64_t offsets) + /// LDFF1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1D {.D }, /Z, [{, , LSL #3}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V5, REG_P6, REG_R7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V2, REG_P6, REG_R5, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1d, EA_SCALABLE, REG_V7, REG_P3, REG_V1, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, double* address, Vector offsets) => GatherVectorWithByteOffsetFirstFaulting(mask, address, offsets); + + + /// GatherVectorWithByteOffsets : Unextended load + + /// + /// svint32_t svld1_gather_[s32]offset[_s32](svbool_t pg, const int32_t *base, svint32_t offsets) + /// LD1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, int* address, Vector offsets) => GatherVectorWithByteOffsets(mask, address, offsets); + + /// + /// svint32_t svld1_gather_[u32]offset[_s32](svbool_t pg, const int32_t *base, svuint32_t offsets) + /// LD1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, int* address, Vector offsets) => GatherVectorWithByteOffsets(mask, address, offsets); + + /// + /// svint64_t svld1_gather_[s64]offset[_s64](svbool_t pg, const int64_t *base, svint64_t offsets) + /// LD1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A LD1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V5, REG_P3, REG_R4, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IH_3A_A LD1D {.Q }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 5, INS_OPTS_SCALABLE_Q); + /// IF_SVE_IU_4A LD1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_C LD1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_II_4A LD1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_II_4A_B LD1D {.Q }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P0, REG_R3, REG_R4, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_D LD1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, long* address, Vector offsets) => GatherVectorWithByteOffsets(mask, address, offsets); + + /// + /// svint64_t svld1_gather_[u64]offset[_s64](svbool_t pg, const int64_t *base, svuint64_t offsets) + /// LD1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A LD1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V5, REG_P3, REG_R4, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IH_3A_A LD1D {.Q }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 5, INS_OPTS_SCALABLE_Q); + /// IF_SVE_IU_4A LD1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_C LD1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_II_4A LD1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_II_4A_B LD1D {.Q }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P0, REG_R3, REG_R4, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_D LD1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, long* address, Vector offsets) => GatherVectorWithByteOffsets(mask, address, offsets); + + /// + /// svuint32_t svld1_gather_[s32]offset[_u32](svbool_t pg, const uint32_t *base, svint32_t offsets) + /// LD1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, uint* address, Vector offsets) => GatherVectorWithByteOffsets(mask, address, offsets); + + /// + /// svuint32_t svld1_gather_[u32]offset[_u32](svbool_t pg, const uint32_t *base, svuint32_t offsets) + /// LD1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, uint* address, Vector offsets) => GatherVectorWithByteOffsets(mask, address, offsets); + + /// + /// svuint64_t svld1_gather_[s64]offset[_u64](svbool_t pg, const uint64_t *base, svint64_t offsets) + /// LD1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A LD1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V5, REG_P3, REG_R4, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IH_3A_A LD1D {.Q }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 5, INS_OPTS_SCALABLE_Q); + /// IF_SVE_IU_4A LD1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_C LD1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_II_4A LD1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_II_4A_B LD1D {.Q }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P0, REG_R3, REG_R4, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_D LD1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, ulong* address, Vector offsets) => GatherVectorWithByteOffsets(mask, address, offsets); + + /// + /// svuint64_t svld1_gather_[u64]offset[_u64](svbool_t pg, const uint64_t *base, svuint64_t offsets) + /// LD1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A LD1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V5, REG_P3, REG_R4, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IH_3A_A LD1D {.Q }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 5, INS_OPTS_SCALABLE_Q); + /// IF_SVE_IU_4A LD1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_C LD1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_II_4A LD1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_II_4A_B LD1D {.Q }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P0, REG_R3, REG_R4, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_D LD1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, ulong* address, Vector offsets) => GatherVectorWithByteOffsets(mask, address, offsets); + + /// + /// svfloat32_t svld1_gather_[s32]offset[_f32](svbool_t pg, const float32_t *base, svint32_t offsets) + /// LD1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, float* address, Vector offsets) => GatherVectorWithByteOffsets(mask, address, offsets); + + /// + /// svfloat32_t svld1_gather_[u32]offset[_f32](svbool_t pg, const float32_t *base, svuint32_t offsets) + /// LD1W Zresult.S, Pg/Z, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, float* address, Vector offsets) => GatherVectorWithByteOffsets(mask, address, offsets); + + /// + /// svfloat64_t svld1_gather_[s64]offset[_f64](svbool_t pg, const float64_t *base, svint64_t offsets) + /// LD1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A LD1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V5, REG_P3, REG_R4, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IH_3A_A LD1D {.Q }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 5, INS_OPTS_SCALABLE_Q); + /// IF_SVE_IU_4A LD1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_C LD1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_II_4A LD1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_II_4A_B LD1D {.Q }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P0, REG_R3, REG_R4, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_D LD1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, double* address, Vector offsets) => GatherVectorWithByteOffsets(mask, address, offsets); + + /// + /// svfloat64_t svld1_gather_[u64]offset[_f64](svbool_t pg, const float64_t *base, svuint64_t offsets) + /// LD1D Zresult.D, Pg/Z, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A LD1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V5, REG_P3, REG_R4, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IH_3A_A LD1D {.Q }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 5, INS_OPTS_SCALABLE_Q); + /// IF_SVE_IU_4A LD1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_C LD1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_II_4A LD1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_II_4A_B LD1D {.Q }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P0, REG_R3, REG_R4, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_D LD1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, double* address, Vector offsets) => GatherVectorWithByteOffsets(mask, address, offsets); + + + /// GetActiveElementCount : Count set predicate bits + + /// + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) + /// CNTP Xresult, Pg, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DK_3A CNTP , , . + /// theEmitter->emitIns_R_R_R(INS_sve_cntp, EA_8BYTE, REG_R29, REG_P0, REG_P15, INS_OPTS_SCALABLE_D); + /// IF_SVE_DL_2A CNTP , ., + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) => GetActiveElementCount(mask, from); + + /// + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) + /// CNTP Xresult, Pg, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DK_3A CNTP , , . + /// theEmitter->emitIns_R_R_R(INS_sve_cntp, EA_8BYTE, REG_R29, REG_P0, REG_P15, INS_OPTS_SCALABLE_D); + /// IF_SVE_DL_2A CNTP , ., + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) => GetActiveElementCount(mask, from); + + /// + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) + /// CNTP Xresult, Pg, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DK_3A CNTP , , . + /// theEmitter->emitIns_R_R_R(INS_sve_cntp, EA_8BYTE, REG_R29, REG_P0, REG_P15, INS_OPTS_SCALABLE_D); + /// IF_SVE_DL_2A CNTP , ., + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) => GetActiveElementCount(mask, from); + + /// + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) + /// CNTP Xresult, Pg, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DK_3A CNTP , , . + /// theEmitter->emitIns_R_R_R(INS_sve_cntp, EA_8BYTE, REG_R29, REG_P0, REG_P15, INS_OPTS_SCALABLE_D); + /// IF_SVE_DL_2A CNTP , ., + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) => GetActiveElementCount(mask, from); + + /// + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) + /// CNTP Xresult, Pg, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DK_3A CNTP , , . + /// theEmitter->emitIns_R_R_R(INS_sve_cntp, EA_8BYTE, REG_R29, REG_P0, REG_P15, INS_OPTS_SCALABLE_D); + /// IF_SVE_DL_2A CNTP , ., + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) => GetActiveElementCount(mask, from); + + /// + /// uint64_t svcntp_b16(svbool_t pg, svbool_t op) + /// CNTP Xresult, Pg, Pop.H + /// + /// codegenarm64test: + /// IF_SVE_DK_3A CNTP , , . + /// theEmitter->emitIns_R_R_R(INS_sve_cntp, EA_8BYTE, REG_R29, REG_P0, REG_P15, INS_OPTS_SCALABLE_D); + /// IF_SVE_DL_2A CNTP , ., + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) => GetActiveElementCount(mask, from); + + /// + /// uint64_t svcntp_b32(svbool_t pg, svbool_t op) + /// CNTP Xresult, Pg, Pop.S + /// + /// codegenarm64test: + /// IF_SVE_DK_3A CNTP , , . + /// theEmitter->emitIns_R_R_R(INS_sve_cntp, EA_8BYTE, REG_R29, REG_P0, REG_P15, INS_OPTS_SCALABLE_D); + /// IF_SVE_DL_2A CNTP , ., + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) => GetActiveElementCount(mask, from); + + /// + /// uint64_t svcntp_b64(svbool_t pg, svbool_t op) + /// CNTP Xresult, Pg, Pop.D + /// + /// codegenarm64test: + /// IF_SVE_DK_3A CNTP , , . + /// theEmitter->emitIns_R_R_R(INS_sve_cntp, EA_8BYTE, REG_R29, REG_P0, REG_P15, INS_OPTS_SCALABLE_D); + /// IF_SVE_DL_2A CNTP , ., + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) => GetActiveElementCount(mask, from); + + /// + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) + /// CNTP Xresult, Pg, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DK_3A CNTP , , . + /// theEmitter->emitIns_R_R_R(INS_sve_cntp, EA_8BYTE, REG_R29, REG_P0, REG_P15, INS_OPTS_SCALABLE_D); + /// IF_SVE_DL_2A CNTP , ., + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) => GetActiveElementCount(mask, from); + + /// + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) + /// CNTP Xresult, Pg, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DK_3A CNTP , , . + /// theEmitter->emitIns_R_R_R(INS_sve_cntp, EA_8BYTE, REG_R29, REG_P0, REG_P15, INS_OPTS_SCALABLE_D); + /// IF_SVE_DL_2A CNTP , ., + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) => GetActiveElementCount(mask, from); + + + /// GetFfr : Read FFR, returning predicate of succesfully loaded elements + + /// + /// svbool_t svrdffr() + /// RDFFR Presult.B + /// svbool_t svrdffr_z(svbool_t pg) + /// RDFFR Presult.B, Pg/Z + /// + /// codegenarm64test: + /// IF_SVE_DG_2A RDFFR .B, /Z + /// theEmitter->emitIns_R_R(INS_sve_rdffr, EA_SCALABLE, REG_P10, REG_P15, INS_OPTS_SCALABLE_B); + /// IF_SVE_DH_1A RDFFR .B + /// theEmitter->emitIns_R(INS_sve_rdffr, EA_SCALABLE, REG_P8); + /// + public static unsafe Vector GetFfr() => GetFfr(); + + /// + /// svbool_t svrdffr() + /// RDFFR Presult.B + /// svbool_t svrdffr_z(svbool_t pg) + /// RDFFR Presult.B, Pg/Z + /// + /// codegenarm64test: + /// IF_SVE_DG_2A RDFFR .B, /Z + /// theEmitter->emitIns_R_R(INS_sve_rdffr, EA_SCALABLE, REG_P10, REG_P15, INS_OPTS_SCALABLE_B); + /// IF_SVE_DH_1A RDFFR .B + /// theEmitter->emitIns_R(INS_sve_rdffr, EA_SCALABLE, REG_P8); + /// + public static unsafe Vector GetFfr() => GetFfr(); + + /// + /// svbool_t svrdffr() + /// RDFFR Presult.B + /// svbool_t svrdffr_z(svbool_t pg) + /// RDFFR Presult.B, Pg/Z + /// + /// codegenarm64test: + /// IF_SVE_DG_2A RDFFR .B, /Z + /// theEmitter->emitIns_R_R(INS_sve_rdffr, EA_SCALABLE, REG_P10, REG_P15, INS_OPTS_SCALABLE_B); + /// IF_SVE_DH_1A RDFFR .B + /// theEmitter->emitIns_R(INS_sve_rdffr, EA_SCALABLE, REG_P8); + /// + public static unsafe Vector GetFfr() => GetFfr(); + + /// + /// svbool_t svrdffr() + /// RDFFR Presult.B + /// svbool_t svrdffr_z(svbool_t pg) + /// RDFFR Presult.B, Pg/Z + /// + /// codegenarm64test: + /// IF_SVE_DG_2A RDFFR .B, /Z + /// theEmitter->emitIns_R_R(INS_sve_rdffr, EA_SCALABLE, REG_P10, REG_P15, INS_OPTS_SCALABLE_B); + /// IF_SVE_DH_1A RDFFR .B + /// theEmitter->emitIns_R(INS_sve_rdffr, EA_SCALABLE, REG_P8); + /// + public static unsafe Vector GetFfr() => GetFfr(); + + /// + /// svbool_t svrdffr() + /// RDFFR Presult.B + /// svbool_t svrdffr_z(svbool_t pg) + /// RDFFR Presult.B, Pg/Z + /// + /// codegenarm64test: + /// IF_SVE_DG_2A RDFFR .B, /Z + /// theEmitter->emitIns_R_R(INS_sve_rdffr, EA_SCALABLE, REG_P10, REG_P15, INS_OPTS_SCALABLE_B); + /// IF_SVE_DH_1A RDFFR .B + /// theEmitter->emitIns_R(INS_sve_rdffr, EA_SCALABLE, REG_P8); + /// + public static unsafe Vector GetFfr() => GetFfr(); + + /// + /// svbool_t svrdffr() + /// RDFFR Presult.B + /// svbool_t svrdffr_z(svbool_t pg) + /// RDFFR Presult.B, Pg/Z + /// + /// codegenarm64test: + /// IF_SVE_DG_2A RDFFR .B, /Z + /// theEmitter->emitIns_R_R(INS_sve_rdffr, EA_SCALABLE, REG_P10, REG_P15, INS_OPTS_SCALABLE_B); + /// IF_SVE_DH_1A RDFFR .B + /// theEmitter->emitIns_R(INS_sve_rdffr, EA_SCALABLE, REG_P8); + /// + public static unsafe Vector GetFfr() => GetFfr(); + + /// + /// svbool_t svrdffr() + /// RDFFR Presult.B + /// svbool_t svrdffr_z(svbool_t pg) + /// RDFFR Presult.B, Pg/Z + /// + /// codegenarm64test: + /// IF_SVE_DG_2A RDFFR .B, /Z + /// theEmitter->emitIns_R_R(INS_sve_rdffr, EA_SCALABLE, REG_P10, REG_P15, INS_OPTS_SCALABLE_B); + /// IF_SVE_DH_1A RDFFR .B + /// theEmitter->emitIns_R(INS_sve_rdffr, EA_SCALABLE, REG_P8); + /// + public static unsafe Vector GetFfr() => GetFfr(); + + /// + /// svbool_t svrdffr() + /// RDFFR Presult.B + /// svbool_t svrdffr_z(svbool_t pg) + /// RDFFR Presult.B, Pg/Z + /// + /// codegenarm64test: + /// IF_SVE_DG_2A RDFFR .B, /Z + /// theEmitter->emitIns_R_R(INS_sve_rdffr, EA_SCALABLE, REG_P10, REG_P15, INS_OPTS_SCALABLE_B); + /// IF_SVE_DH_1A RDFFR .B + /// theEmitter->emitIns_R(INS_sve_rdffr, EA_SCALABLE, REG_P8); + /// + public static unsafe Vector GetFfr() => GetFfr(); + + + /// InsertIntoShiftedVector : Insert scalar into shifted vector + + /// + /// svint8_t svinsr[_n_s8](svint8_t op1, int8_t op2) + /// INSR Ztied1.B, Wop2 + /// INSR Ztied1.B, Bop2 + /// + /// codegenarm64test: + /// sve_insr - not implemented in coreclr + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, sbyte right) => InsertIntoShiftedVector(left, right); + + /// + /// svint16_t svinsr[_n_s16](svint16_t op1, int16_t op2) + /// INSR Ztied1.H, Wop2 + /// INSR Ztied1.H, Hop2 + /// + /// codegenarm64test: + /// sve_insr - not implemented in coreclr + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, short right) => InsertIntoShiftedVector(left, right); + + /// + /// svint32_t svinsr[_n_s32](svint32_t op1, int32_t op2) + /// INSR Ztied1.S, Wop2 + /// INSR Ztied1.S, Sop2 + /// + /// codegenarm64test: + /// sve_insr - not implemented in coreclr + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, int right) => InsertIntoShiftedVector(left, right); + + /// + /// svint64_t svinsr[_n_s64](svint64_t op1, int64_t op2) + /// INSR Ztied1.D, Xop2 + /// INSR Ztied1.D, Dop2 + /// + /// codegenarm64test: + /// sve_insr - not implemented in coreclr + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, long right) => InsertIntoShiftedVector(left, right); + + /// + /// svuint8_t svinsr[_n_u8](svuint8_t op1, uint8_t op2) + /// INSR Ztied1.B, Wop2 + /// INSR Ztied1.B, Bop2 + /// + /// codegenarm64test: + /// sve_insr - not implemented in coreclr + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, byte right) => InsertIntoShiftedVector(left, right); + + /// + /// svuint16_t svinsr[_n_u16](svuint16_t op1, uint16_t op2) + /// INSR Ztied1.H, Wop2 + /// INSR Ztied1.H, Hop2 + /// + /// codegenarm64test: + /// sve_insr - not implemented in coreclr + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, ushort right) => InsertIntoShiftedVector(left, right); + + /// + /// svuint32_t svinsr[_n_u32](svuint32_t op1, uint32_t op2) + /// INSR Ztied1.S, Wop2 + /// INSR Ztied1.S, Sop2 + /// + /// codegenarm64test: + /// sve_insr - not implemented in coreclr + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, uint right) => InsertIntoShiftedVector(left, right); + + /// + /// svuint64_t svinsr[_n_u64](svuint64_t op1, uint64_t op2) + /// INSR Ztied1.D, Xop2 + /// INSR Ztied1.D, Dop2 + /// + /// codegenarm64test: + /// sve_insr - not implemented in coreclr + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, ulong right) => InsertIntoShiftedVector(left, right); + + /// + /// svfloat32_t svinsr[_n_f32](svfloat32_t op1, float32_t op2) + /// INSR Ztied1.S, Wop2 + /// INSR Ztied1.S, Sop2 + /// + /// codegenarm64test: + /// sve_insr - not implemented in coreclr + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, float right) => InsertIntoShiftedVector(left, right); + + /// + /// svfloat64_t svinsr[_n_f64](svfloat64_t op1, float64_t op2) + /// INSR Ztied1.D, Xop2 + /// INSR Ztied1.D, Dop2 + /// + /// codegenarm64test: + /// sve_insr - not implemented in coreclr + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, double right) => InsertIntoShiftedVector(left, right); + + + /// LeadingSignCount : Count leading sign bits + + /// + /// svuint8_t svcls[_s8]_m(svuint8_t inactive, svbool_t pg, svint8_t op) + /// CLS Ztied.B, Pg/M, Zop.B + /// MOVPRFX Zresult, Zinactive; CLS Zresult.B, Pg/M, Zop.B + /// svuint8_t svcls[_s8]_x(svbool_t pg, svint8_t op) + /// CLS Ztied.B, Pg/M, Ztied.B + /// MOVPRFX Zresult, Zop; CLS Zresult.B, Pg/M, Zop.B + /// svuint8_t svcls[_s8]_z(svbool_t pg, svint8_t op) + /// MOVPRFX Zresult.B, Pg/Z, Zop.B; CLS Zresult.B, Pg/M, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CLS ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_cls, EA_SCALABLE, REG_V31, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LeadingSignCount(Vector value) => LeadingSignCount(value); + + /// + /// svuint16_t svcls[_s16]_m(svuint16_t inactive, svbool_t pg, svint16_t op) + /// CLS Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; CLS Zresult.H, Pg/M, Zop.H + /// svuint16_t svcls[_s16]_x(svbool_t pg, svint16_t op) + /// CLS Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; CLS Zresult.H, Pg/M, Zop.H + /// svuint16_t svcls[_s16]_z(svbool_t pg, svint16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; CLS Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CLS ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_cls, EA_SCALABLE, REG_V31, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LeadingSignCount(Vector value) => LeadingSignCount(value); + + /// + /// svuint32_t svcls[_s32]_m(svuint32_t inactive, svbool_t pg, svint32_t op) + /// CLS Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; CLS Zresult.S, Pg/M, Zop.S + /// svuint32_t svcls[_s32]_x(svbool_t pg, svint32_t op) + /// CLS Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; CLS Zresult.S, Pg/M, Zop.S + /// svuint32_t svcls[_s32]_z(svbool_t pg, svint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; CLS Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CLS ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_cls, EA_SCALABLE, REG_V31, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LeadingSignCount(Vector value) => LeadingSignCount(value); + + /// + /// svuint64_t svcls[_s64]_m(svuint64_t inactive, svbool_t pg, svint64_t op) + /// CLS Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; CLS Zresult.D, Pg/M, Zop.D + /// svuint64_t svcls[_s64]_x(svbool_t pg, svint64_t op) + /// CLS Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; CLS Zresult.D, Pg/M, Zop.D + /// svuint64_t svcls[_s64]_z(svbool_t pg, svint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; CLS Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CLS ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_cls, EA_SCALABLE, REG_V31, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LeadingSignCount(Vector value) => LeadingSignCount(value); + + + /// LeadingZeroCount : Count leading zero bits + + /// + /// svuint8_t svclz[_s8]_m(svuint8_t inactive, svbool_t pg, svint8_t op) + /// CLZ Ztied.B, Pg/M, Zop.B + /// MOVPRFX Zresult, Zinactive; CLZ Zresult.B, Pg/M, Zop.B + /// svuint8_t svclz[_s8]_x(svbool_t pg, svint8_t op) + /// CLZ Ztied.B, Pg/M, Ztied.B + /// MOVPRFX Zresult, Zop; CLZ Zresult.B, Pg/M, Zop.B + /// svuint8_t svclz[_s8]_z(svbool_t pg, svint8_t op) + /// MOVPRFX Zresult.B, Pg/Z, Zop.B; CLZ Zresult.B, Pg/M, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CLZ ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_clz, EA_SCALABLE, REG_V30, REG_P1, REG_V1, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LeadingZeroCount(Vector value) => LeadingZeroCount(value); + + /// + /// svuint8_t svclz[_u8]_m(svuint8_t inactive, svbool_t pg, svuint8_t op) + /// CLZ Ztied.B, Pg/M, Zop.B + /// MOVPRFX Zresult, Zinactive; CLZ Zresult.B, Pg/M, Zop.B + /// svuint8_t svclz[_u8]_x(svbool_t pg, svuint8_t op) + /// CLZ Ztied.B, Pg/M, Ztied.B + /// MOVPRFX Zresult, Zop; CLZ Zresult.B, Pg/M, Zop.B + /// svuint8_t svclz[_u8]_z(svbool_t pg, svuint8_t op) + /// MOVPRFX Zresult.B, Pg/Z, Zop.B; CLZ Zresult.B, Pg/M, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CLZ ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_clz, EA_SCALABLE, REG_V30, REG_P1, REG_V1, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LeadingZeroCount(Vector value) => LeadingZeroCount(value); + + /// + /// svuint16_t svclz[_s16]_m(svuint16_t inactive, svbool_t pg, svint16_t op) + /// CLZ Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; CLZ Zresult.H, Pg/M, Zop.H + /// svuint16_t svclz[_s16]_x(svbool_t pg, svint16_t op) + /// CLZ Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; CLZ Zresult.H, Pg/M, Zop.H + /// svuint16_t svclz[_s16]_z(svbool_t pg, svint16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; CLZ Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CLZ ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_clz, EA_SCALABLE, REG_V30, REG_P1, REG_V1, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LeadingZeroCount(Vector value) => LeadingZeroCount(value); + + /// + /// svuint16_t svclz[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) + /// CLZ Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; CLZ Zresult.H, Pg/M, Zop.H + /// svuint16_t svclz[_u16]_x(svbool_t pg, svuint16_t op) + /// CLZ Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; CLZ Zresult.H, Pg/M, Zop.H + /// svuint16_t svclz[_u16]_z(svbool_t pg, svuint16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; CLZ Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CLZ ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_clz, EA_SCALABLE, REG_V30, REG_P1, REG_V1, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LeadingZeroCount(Vector value) => LeadingZeroCount(value); + + /// + /// svuint32_t svclz[_s32]_m(svuint32_t inactive, svbool_t pg, svint32_t op) + /// CLZ Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; CLZ Zresult.S, Pg/M, Zop.S + /// svuint32_t svclz[_s32]_x(svbool_t pg, svint32_t op) + /// CLZ Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; CLZ Zresult.S, Pg/M, Zop.S + /// svuint32_t svclz[_s32]_z(svbool_t pg, svint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; CLZ Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CLZ ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_clz, EA_SCALABLE, REG_V30, REG_P1, REG_V1, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LeadingZeroCount(Vector value) => LeadingZeroCount(value); + + /// + /// svuint32_t svclz[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// CLZ Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; CLZ Zresult.S, Pg/M, Zop.S + /// svuint32_t svclz[_u32]_x(svbool_t pg, svuint32_t op) + /// CLZ Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; CLZ Zresult.S, Pg/M, Zop.S + /// svuint32_t svclz[_u32]_z(svbool_t pg, svuint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; CLZ Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CLZ ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_clz, EA_SCALABLE, REG_V30, REG_P1, REG_V1, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LeadingZeroCount(Vector value) => LeadingZeroCount(value); + + /// + /// svuint64_t svclz[_s64]_m(svuint64_t inactive, svbool_t pg, svint64_t op) + /// CLZ Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; CLZ Zresult.D, Pg/M, Zop.D + /// svuint64_t svclz[_s64]_x(svbool_t pg, svint64_t op) + /// CLZ Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; CLZ Zresult.D, Pg/M, Zop.D + /// svuint64_t svclz[_s64]_z(svbool_t pg, svint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; CLZ Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CLZ ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_clz, EA_SCALABLE, REG_V30, REG_P1, REG_V1, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LeadingZeroCount(Vector value) => LeadingZeroCount(value); + + /// + /// svuint64_t svclz[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// CLZ Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; CLZ Zresult.D, Pg/M, Zop.D + /// svuint64_t svclz[_u64]_x(svbool_t pg, svuint64_t op) + /// CLZ Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; CLZ Zresult.D, Pg/M, Zop.D + /// svuint64_t svclz[_u64]_z(svbool_t pg, svuint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; CLZ Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CLZ ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_clz, EA_SCALABLE, REG_V30, REG_P1, REG_V1, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LeadingZeroCount(Vector value) => LeadingZeroCount(value); + + + /// LoadVector : Unextended load + + /// + /// svint8_t svld1[_s8](svbool_t pg, const int8_t *base) + /// LD1B Zresult.B, Pg/Z, [Xarray, Xindex] + /// LD1B Zresult.B, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_E LD1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_H LD1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVector(Vector mask, sbyte* address) => LoadVector(mask, address); + + /// + /// svint16_t svld1[_s16](svbool_t pg, const int16_t *base) + /// LD1H Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVector(Vector mask, short* address) => LoadVector(mask, address); + + /// + /// svint32_t svld1[_s32](svbool_t pg, const int32_t *base) + /// LD1W Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2] + /// LD1W Zresult.S, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVector(Vector mask, int* address) => LoadVector(mask, address); + + /// + /// svint64_t svld1[_s64](svbool_t pg, const int64_t *base) + /// LD1D Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3] + /// LD1D Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A LD1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V5, REG_P3, REG_R4, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IH_3A_A LD1D {.Q }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 5, INS_OPTS_SCALABLE_Q); + /// IF_SVE_IU_4A LD1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_C LD1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_II_4A LD1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_II_4A_B LD1D {.Q }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P0, REG_R3, REG_R4, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_D LD1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVector(Vector mask, long* address) => LoadVector(mask, address); + + /// + /// svuint8_t svld1[_u8](svbool_t pg, const uint8_t *base) + /// LD1B Zresult.B, Pg/Z, [Xarray, Xindex] + /// LD1B Zresult.B, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_E LD1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_H LD1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVector(Vector mask, byte* address) => LoadVector(mask, address); + + /// + /// svuint16_t svld1[_u16](svbool_t pg, const uint16_t *base) + /// LD1H Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVector(Vector mask, ushort* address) => LoadVector(mask, address); + + /// + /// svuint32_t svld1[_u32](svbool_t pg, const uint32_t *base) + /// LD1W Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2] + /// LD1W Zresult.S, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVector(Vector mask, uint* address) => LoadVector(mask, address); + + /// + /// svuint64_t svld1[_u64](svbool_t pg, const uint64_t *base) + /// LD1D Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3] + /// LD1D Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A LD1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V5, REG_P3, REG_R4, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IH_3A_A LD1D {.Q }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 5, INS_OPTS_SCALABLE_Q); + /// IF_SVE_IU_4A LD1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_C LD1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_II_4A LD1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_II_4A_B LD1D {.Q }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P0, REG_R3, REG_R4, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_D LD1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVector(Vector mask, ulong* address) => LoadVector(mask, address); + + /// + /// svfloat32_t svld1[_f32](svbool_t pg, const float32_t *base) + /// LD1W Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2] + /// LD1W Zresult.S, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVector(Vector mask, float* address) => LoadVector(mask, address); + + /// + /// svfloat64_t svld1[_f64](svbool_t pg, const float64_t *base) + /// LD1D Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3] + /// LD1D Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A LD1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V5, REG_P3, REG_R4, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IH_3A_A LD1D {.Q }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 5, INS_OPTS_SCALABLE_Q); + /// IF_SVE_IU_4A LD1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_C LD1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_II_4A LD1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_II_4A_B LD1D {.Q }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P0, REG_R3, REG_R4, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_D LD1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1d, EA_SCALABLE, REG_V0, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1d, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVector(Vector mask, double* address) => LoadVector(mask, address); + + + /// LoadVector128AndReplicateToVector : Load and replicate 128 bits of data + + /// + /// svint8_t svld1rq[_s8](svbool_t pg, const int8_t *base) + /// LD1RQB Zresult.B, Pg/Z, [Xarray, Xindex] + /// LD1RQB Zresult.B, Pg/Z, [Xarray, #index] + /// LD1RQB Zresult.B, Pg/Z, [Xbase, #0] + /// + /// codegenarm64test: + /// IF_SVE_IO_3A LD1RQB {.B }, /Z, [{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1rqb, EA_SCALABLE, REG_V6, REG_P7, REG_R8, 64, INS_OPTS_SCALABLE_B); + /// IF_SVE_IP_4A LD1RQB {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1rqb, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R2, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, sbyte* address) => LoadVector128AndReplicateToVector(mask, address); + + /// + /// svint16_t svld1rq[_s16](svbool_t pg, const int16_t *base) + /// LD1RQH Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD1RQH Zresult.H, Pg/Z, [Xarray, #index * 2] + /// LD1RQH Zresult.H, Pg/Z, [Xbase, #0] + /// + /// codegenarm64test: + /// IF_SVE_IO_3A LD1RQH {.H }, /Z, [{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1rqh, EA_SCALABLE, REG_V4, REG_P5, REG_R6, 112, INS_OPTS_SCALABLE_H); + /// IF_SVE_IP_4A LD1RQH {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1rqh, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, short* address) => LoadVector128AndReplicateToVector(mask, address); + + /// + /// svint32_t svld1rq[_s32](svbool_t pg, const int32_t *base) + /// LD1RQW Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2] + /// LD1RQW Zresult.S, Pg/Z, [Xarray, #index * 4] + /// LD1RQW Zresult.S, Pg/Z, [Xbase, #0] + /// + /// codegenarm64test: + /// IF_SVE_IO_3A LD1RQW {.S }, /Z, [{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1rqw, EA_SCALABLE, REG_V31, REG_P2, REG_R1, -16, INS_OPTS_SCALABLE_S); + /// IF_SVE_IP_4A LD1RQW {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1rqw, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, int* address) => LoadVector128AndReplicateToVector(mask, address); + + /// + /// svint64_t svld1rq[_s64](svbool_t pg, const int64_t *base) + /// LD1RQD Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3] + /// LD1RQD Zresult.D, Pg/Z, [Xarray, #index * 8] + /// LD1RQD Zresult.D, Pg/Z, [Xbase, #0] + /// + /// codegenarm64test: + /// IF_SVE_IO_3A LD1RQD {.D }, /Z, [{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1rqd, EA_SCALABLE, REG_V9, REG_P0, REG_R1, -128, INS_OPTS_SCALABLE_D); + /// IF_SVE_IP_4A LD1RQD {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1rqd, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, long* address) => LoadVector128AndReplicateToVector(mask, address); + + /// + /// svuint8_t svld1rq[_u8](svbool_t pg, const uint8_t *base) + /// LD1RQB Zresult.B, Pg/Z, [Xarray, Xindex] + /// LD1RQB Zresult.B, Pg/Z, [Xarray, #index] + /// LD1RQB Zresult.B, Pg/Z, [Xbase, #0] + /// + /// codegenarm64test: + /// IF_SVE_IO_3A LD1RQB {.B }, /Z, [{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1rqb, EA_SCALABLE, REG_V6, REG_P7, REG_R8, 64, INS_OPTS_SCALABLE_B); + /// IF_SVE_IP_4A LD1RQB {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1rqb, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R2, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, byte* address) => LoadVector128AndReplicateToVector(mask, address); + + /// + /// svuint16_t svld1rq[_u16](svbool_t pg, const uint16_t *base) + /// LD1RQH Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD1RQH Zresult.H, Pg/Z, [Xarray, #index * 2] + /// LD1RQH Zresult.H, Pg/Z, [Xbase, #0] + /// + /// codegenarm64test: + /// IF_SVE_IO_3A LD1RQH {.H }, /Z, [{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1rqh, EA_SCALABLE, REG_V4, REG_P5, REG_R6, 112, INS_OPTS_SCALABLE_H); + /// IF_SVE_IP_4A LD1RQH {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1rqh, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, ushort* address) => LoadVector128AndReplicateToVector(mask, address); + + /// + /// svuint32_t svld1rq[_u32](svbool_t pg, const uint32_t *base) + /// LD1RQW Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2] + /// LD1RQW Zresult.S, Pg/Z, [Xarray, #index * 4] + /// LD1RQW Zresult.S, Pg/Z, [Xbase, #0] + /// + /// codegenarm64test: + /// IF_SVE_IO_3A LD1RQW {.S }, /Z, [{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1rqw, EA_SCALABLE, REG_V31, REG_P2, REG_R1, -16, INS_OPTS_SCALABLE_S); + /// IF_SVE_IP_4A LD1RQW {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1rqw, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, uint* address) => LoadVector128AndReplicateToVector(mask, address); + + /// + /// svuint64_t svld1rq[_u64](svbool_t pg, const uint64_t *base) + /// LD1RQD Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3] + /// LD1RQD Zresult.D, Pg/Z, [Xarray, #index * 8] + /// LD1RQD Zresult.D, Pg/Z, [Xbase, #0] + /// + /// codegenarm64test: + /// IF_SVE_IO_3A LD1RQD {.D }, /Z, [{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1rqd, EA_SCALABLE, REG_V9, REG_P0, REG_R1, -128, INS_OPTS_SCALABLE_D); + /// IF_SVE_IP_4A LD1RQD {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1rqd, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, ulong* address) => LoadVector128AndReplicateToVector(mask, address); + + /// + /// svfloat32_t svld1rq[_f32](svbool_t pg, const float32_t *base) + /// LD1RQW Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2] + /// LD1RQW Zresult.S, Pg/Z, [Xarray, #index * 4] + /// LD1RQW Zresult.S, Pg/Z, [Xbase, #0] + /// + /// codegenarm64test: + /// IF_SVE_IO_3A LD1RQW {.S }, /Z, [{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1rqw, EA_SCALABLE, REG_V31, REG_P2, REG_R1, -16, INS_OPTS_SCALABLE_S); + /// IF_SVE_IP_4A LD1RQW {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1rqw, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, float* address) => LoadVector128AndReplicateToVector(mask, address); + + /// + /// svfloat64_t svld1rq[_f64](svbool_t pg, const float64_t *base) + /// LD1RQD Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3] + /// LD1RQD Zresult.D, Pg/Z, [Xarray, #index * 8] + /// LD1RQD Zresult.D, Pg/Z, [Xbase, #0] + /// + /// codegenarm64test: + /// IF_SVE_IO_3A LD1RQD {.D }, /Z, [{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1rqd, EA_SCALABLE, REG_V9, REG_P0, REG_R1, -128, INS_OPTS_SCALABLE_D); + /// IF_SVE_IP_4A LD1RQD {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1rqd, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, double* address) => LoadVector128AndReplicateToVector(mask, address); + + + /// LoadVectorByteNonFaultingZeroExtendToInt16 : Load 8-bit data and zero-extend, non-faulting + + /// + /// svint16_t svldnf1ub_s16(svbool_t pg, const uint8_t *base) + /// LDNF1B Zresult.H, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_C LDNF1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, -4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, -2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToInt16(byte* address) => LoadVectorByteNonFaultingZeroExtendToInt16(address); + + + /// LoadVectorByteNonFaultingZeroExtendToInt32 : Load 8-bit data and zero-extend, non-faulting + + /// + /// svint32_t svldnf1ub_s32(svbool_t pg, const uint8_t *base) + /// LDNF1B Zresult.S, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_C LDNF1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, -4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, -2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToInt32(byte* address) => LoadVectorByteNonFaultingZeroExtendToInt32(address); + + + /// LoadVectorByteNonFaultingZeroExtendToInt64 : Load 8-bit data and zero-extend, non-faulting + + /// + /// svint64_t svldnf1ub_s64(svbool_t pg, const uint8_t *base) + /// LDNF1B Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_C LDNF1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, -4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, -2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToInt64(byte* address) => LoadVectorByteNonFaultingZeroExtendToInt64(address); + + + /// LoadVectorByteNonFaultingZeroExtendToUInt16 : Load 8-bit data and zero-extend, non-faulting + + /// + /// svuint16_t svldnf1ub_u16(svbool_t pg, const uint8_t *base) + /// LDNF1B Zresult.H, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_C LDNF1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, -4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, -2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToUInt16(byte* address) => LoadVectorByteNonFaultingZeroExtendToUInt16(address); + + + /// LoadVectorByteNonFaultingZeroExtendToUInt32 : Load 8-bit data and zero-extend, non-faulting + + /// + /// svuint32_t svldnf1ub_u32(svbool_t pg, const uint8_t *base) + /// LDNF1B Zresult.S, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_C LDNF1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, -4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, -2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToUInt32(byte* address) => LoadVectorByteNonFaultingZeroExtendToUInt32(address); + + + /// LoadVectorByteNonFaultingZeroExtendToUInt64 : Load 8-bit data and zero-extend, non-faulting + + /// + /// svuint64_t svldnf1ub_u64(svbool_t pg, const uint8_t *base) + /// LDNF1B Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_C LDNF1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, -4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, -2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToUInt64(byte* address) => LoadVectorByteNonFaultingZeroExtendToUInt64(address); + + + /// LoadVectorByteZeroExtendFirstFaulting : Load 8-bit data and zero-extend, first-faulting + + /// + /// svint16_t svldff1ub_s16(svbool_t pg, const uint8_t *base) + /// LDFF1B Zresult.H, Pg/Z, [Xarray, Xindex] + /// LDFF1B Zresult.H, Pg/Z, [Xbase, XZR] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V2, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_E LDFF1B {.B }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address) => LoadVectorByteZeroExtendFirstFaulting(mask, address); + + /// + /// svint32_t svldff1ub_s32(svbool_t pg, const uint8_t *base) + /// LDFF1B Zresult.S, Pg/Z, [Xarray, Xindex] + /// LDFF1B Zresult.S, Pg/Z, [Xbase, XZR] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V2, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_E LDFF1B {.B }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address) => LoadVectorByteZeroExtendFirstFaulting(mask, address); + + /// + /// svint64_t svldff1ub_s64(svbool_t pg, const uint8_t *base) + /// LDFF1B Zresult.D, Pg/Z, [Xarray, Xindex] + /// LDFF1B Zresult.D, Pg/Z, [Xbase, XZR] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V2, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_E LDFF1B {.B }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address) => LoadVectorByteZeroExtendFirstFaulting(mask, address); + + /// + /// svuint16_t svldff1ub_u16(svbool_t pg, const uint8_t *base) + /// LDFF1B Zresult.H, Pg/Z, [Xarray, Xindex] + /// LDFF1B Zresult.H, Pg/Z, [Xbase, XZR] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V2, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_E LDFF1B {.B }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address) => LoadVectorByteZeroExtendFirstFaulting(mask, address); + + /// + /// svuint32_t svldff1ub_u32(svbool_t pg, const uint8_t *base) + /// LDFF1B Zresult.S, Pg/Z, [Xarray, Xindex] + /// LDFF1B Zresult.S, Pg/Z, [Xbase, XZR] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V2, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_E LDFF1B {.B }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address) => LoadVectorByteZeroExtendFirstFaulting(mask, address); + + /// + /// svuint64_t svldff1ub_u64(svbool_t pg, const uint8_t *base) + /// LDFF1B Zresult.D, Pg/Z, [Xarray, Xindex] + /// LDFF1B Zresult.D, Pg/Z, [Xbase, XZR] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V2, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_E LDFF1B {.B }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(Vector mask, byte* address) => LoadVectorByteZeroExtendFirstFaulting(mask, address); + + + /// LoadVectorByteZeroExtendToInt16 : Load 8-bit data and zero-extend + + /// + /// svint16_t svld1ub_s16(svbool_t pg, const uint8_t *base) + /// LD1B Zresult.H, Pg/Z, [Xarray, Xindex] + /// LD1B Zresult.H, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_E LD1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_H LD1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorByteZeroExtendToInt16(Vector mask, byte* address) => LoadVectorByteZeroExtendToInt16(mask, address); + + + /// LoadVectorByteZeroExtendToInt32 : Load 8-bit data and zero-extend + + /// + /// svint32_t svld1ub_s32(svbool_t pg, const uint8_t *base) + /// LD1B Zresult.S, Pg/Z, [Xarray, Xindex] + /// LD1B Zresult.S, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_E LD1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_H LD1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorByteZeroExtendToInt32(Vector mask, byte* address) => LoadVectorByteZeroExtendToInt32(mask, address); + + + /// LoadVectorByteZeroExtendToInt64 : Load 8-bit data and zero-extend + + /// + /// svint64_t svld1ub_s64(svbool_t pg, const uint8_t *base) + /// LD1B Zresult.D, Pg/Z, [Xarray, Xindex] + /// LD1B Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_E LD1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_H LD1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorByteZeroExtendToInt64(Vector mask, byte* address) => LoadVectorByteZeroExtendToInt64(mask, address); + + + /// LoadVectorByteZeroExtendToUInt16 : Load 8-bit data and zero-extend + + /// + /// svuint16_t svld1ub_u16(svbool_t pg, const uint8_t *base) + /// LD1B Zresult.H, Pg/Z, [Xarray, Xindex] + /// LD1B Zresult.H, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_E LD1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_H LD1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorByteZeroExtendToUInt16(Vector mask, byte* address) => LoadVectorByteZeroExtendToUInt16(mask, address); + + + /// LoadVectorByteZeroExtendToUInt32 : Load 8-bit data and zero-extend + + /// + /// svuint32_t svld1ub_u32(svbool_t pg, const uint8_t *base) + /// LD1B Zresult.S, Pg/Z, [Xarray, Xindex] + /// LD1B Zresult.S, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_E LD1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_H LD1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorByteZeroExtendToUInt32(Vector mask, byte* address) => LoadVectorByteZeroExtendToUInt32(mask, address); + + + /// LoadVectorByteZeroExtendToUInt64 : Load 8-bit data and zero-extend + + /// + /// svuint64_t svld1ub_u64(svbool_t pg, const uint8_t *base) + /// LD1B Zresult.D, Pg/Z, [Xarray, Xindex] + /// LD1B Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_E LD1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P1, REG_R3, 7, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_H LD1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1b, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1b, EA_SCALABLE, REG_V0, REG_P0, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorByteZeroExtendToUInt64(Vector mask, byte* address) => LoadVectorByteZeroExtendToUInt64(mask, address); + + + /// LoadVectorFirstFaulting : Unextended load, first-faulting + + /// + /// svint8_t svldff1[_s8](svbool_t pg, const int8_t *base) + /// LDFF1B Zresult.B, Pg/Z, [Xarray, Xindex] + /// LDFF1B Zresult.B, Pg/Z, [Xbase, XZR] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V2, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_E LDFF1B {.B }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, sbyte* address) => LoadVectorFirstFaulting(mask, address); + + /// + /// svint16_t svldff1[_s16](svbool_t pg, const int16_t *base) + /// LDFF1H Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1] + /// LDFF1H Zresult.H, Pg/Z, [Xbase, XZR, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, short* address) => LoadVectorFirstFaulting(mask, address); + + /// + /// svint32_t svldff1[_s32](svbool_t pg, const int32_t *base) + /// LDFF1W Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2] + /// LDFF1W Zresult.S, Pg/Z, [Xbase, XZR, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, int* address) => LoadVectorFirstFaulting(mask, address); + + /// + /// svint64_t svldff1[_s64](svbool_t pg, const int64_t *base) + /// LDFF1D Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3] + /// LDFF1D Zresult.D, Pg/Z, [Xbase, XZR, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1D {.D }, /Z, [{, , LSL #3}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V5, REG_P6, REG_R7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V2, REG_P6, REG_R5, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1d, EA_SCALABLE, REG_V7, REG_P3, REG_V1, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, long* address) => LoadVectorFirstFaulting(mask, address); + + /// + /// svuint8_t svldff1[_u8](svbool_t pg, const uint8_t *base) + /// LDFF1B Zresult.B, Pg/Z, [Xarray, Xindex] + /// LDFF1B Zresult.B, Pg/Z, [Xbase, XZR] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1B {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1B {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1B {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V2, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_E LDFF1B {.B }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1b, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_R1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1B {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1b, EA_SCALABLE, REG_V4, REG_P3, REG_V1, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, byte* address) => LoadVectorFirstFaulting(mask, address); + + /// + /// svuint16_t svldff1[_u16](svbool_t pg, const uint16_t *base) + /// LDFF1H Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1] + /// LDFF1H Zresult.H, Pg/Z, [Xbase, XZR, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, ushort* address) => LoadVectorFirstFaulting(mask, address); + + /// + /// svuint32_t svldff1[_u32](svbool_t pg, const uint32_t *base) + /// LDFF1W Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2] + /// LDFF1W Zresult.S, Pg/Z, [Xbase, XZR, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, uint* address) => LoadVectorFirstFaulting(mask, address); + + /// + /// svuint64_t svldff1[_u64](svbool_t pg, const uint64_t *base) + /// LDFF1D Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3] + /// LDFF1D Zresult.D, Pg/Z, [Xbase, XZR, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1D {.D }, /Z, [{, , LSL #3}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V5, REG_P6, REG_R7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V2, REG_P6, REG_R5, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1d, EA_SCALABLE, REG_V7, REG_P3, REG_V1, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, ulong* address) => LoadVectorFirstFaulting(mask, address); + + /// + /// svfloat32_t svldff1[_f32](svbool_t pg, const float32_t *base) + /// LDFF1W Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2] + /// LDFF1W Zresult.S, Pg/Z, [Xbase, XZR, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, float* address) => LoadVectorFirstFaulting(mask, address); + + /// + /// svfloat64_t svldff1[_f64](svbool_t pg, const float64_t *base) + /// LDFF1D Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3] + /// LDFF1D Zresult.D, Pg/Z, [Xbase, XZR, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1D {.D }, /Z, [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1D {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V6, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1D {.D }, /Z, [{, , LSL #3}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1D {.D }, /Z, [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V5, REG_P6, REG_R7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1D {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1d, EA_SCALABLE, REG_V2, REG_P6, REG_R5, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1D {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1d, EA_SCALABLE, REG_V7, REG_P3, REG_V1, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, double* address) => LoadVectorFirstFaulting(mask, address); + + + /// LoadVectorInt16NonFaultingSignExtendToInt32 : Load 16-bit data and sign-extend, non-faulting + + /// + /// svint32_t svldnf1sh_s32(svbool_t pg, const int16_t *base) + /// LDNF1SH Zresult.S, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_A LDNF1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sh, EA_SCALABLE, REG_V0, REG_P1, REG_R5, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sh, EA_SCALABLE, REG_V0, REG_P1, REG_R5, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToInt32(short* address) => LoadVectorInt16NonFaultingSignExtendToInt32(address); + + + /// LoadVectorInt16NonFaultingSignExtendToInt64 : Load 16-bit data and sign-extend, non-faulting + + /// + /// svint64_t svldnf1sh_s64(svbool_t pg, const int16_t *base) + /// LDNF1SH Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_A LDNF1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sh, EA_SCALABLE, REG_V0, REG_P1, REG_R5, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sh, EA_SCALABLE, REG_V0, REG_P1, REG_R5, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToInt64(short* address) => LoadVectorInt16NonFaultingSignExtendToInt64(address); + + + /// LoadVectorInt16NonFaultingSignExtendToUInt32 : Load 16-bit data and sign-extend, non-faulting + + /// + /// svuint32_t svldnf1sh_u32(svbool_t pg, const int16_t *base) + /// LDNF1SH Zresult.S, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_A LDNF1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sh, EA_SCALABLE, REG_V0, REG_P1, REG_R5, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sh, EA_SCALABLE, REG_V0, REG_P1, REG_R5, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToUInt32(short* address) => LoadVectorInt16NonFaultingSignExtendToUInt32(address); + + + /// LoadVectorInt16NonFaultingSignExtendToUInt64 : Load 16-bit data and sign-extend, non-faulting + + /// + /// svuint64_t svldnf1sh_u64(svbool_t pg, const int16_t *base) + /// LDNF1SH Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_A LDNF1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sh, EA_SCALABLE, REG_V0, REG_P1, REG_R5, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sh, EA_SCALABLE, REG_V0, REG_P1, REG_R5, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToUInt64(short* address) => LoadVectorInt16NonFaultingSignExtendToUInt64(address); + + + /// LoadVectorInt16SignExtendFirstFaulting : Load 16-bit data and sign-extend, first-faulting + + /// + /// svint32_t svldff1sh_s32(svbool_t pg, const int16_t *base) + /// LDFF1SH Zresult.S, Pg/Z, [Xarray, Xindex, LSL #1] + /// LDFF1SH Zresult.S, Pg/Z, [Xbase, XZR, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1SH {.S }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorInt16SignExtendFirstFaulting(Vector mask, short* address) => LoadVectorInt16SignExtendFirstFaulting(mask, address); + + /// + /// svint64_t svldff1sh_s64(svbool_t pg, const int16_t *base) + /// LDFF1SH Zresult.D, Pg/Z, [Xarray, Xindex, LSL #1] + /// LDFF1SH Zresult.D, Pg/Z, [Xbase, XZR, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1SH {.S }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorInt16SignExtendFirstFaulting(Vector mask, short* address) => LoadVectorInt16SignExtendFirstFaulting(mask, address); + + /// + /// svuint32_t svldff1sh_u32(svbool_t pg, const int16_t *base) + /// LDFF1SH Zresult.S, Pg/Z, [Xarray, Xindex, LSL #1] + /// LDFF1SH Zresult.S, Pg/Z, [Xbase, XZR, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1SH {.S }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorInt16SignExtendFirstFaulting(Vector mask, short* address) => LoadVectorInt16SignExtendFirstFaulting(mask, address); + + /// + /// svuint64_t svldff1sh_u64(svbool_t pg, const int16_t *base) + /// LDFF1SH Zresult.D, Pg/Z, [Xarray, Xindex, LSL #1] + /// LDFF1SH Zresult.D, Pg/Z, [Xbase, XZR, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_V5, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1SH {.S }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sh, EA_SCALABLE, REG_V4, REG_P3, REG_R1, REG_R2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sh, EA_SCALABLE, REG_V3, REG_P5, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorInt16SignExtendFirstFaulting(Vector mask, short* address) => LoadVectorInt16SignExtendFirstFaulting(mask, address); + + + /// LoadVectorInt16SignExtendToInt32 : Load 16-bit data and sign-extend + + /// + /// svint32_t svld1sh_s32(svbool_t pg, const int16_t *base) + /// LD1SH Zresult.S, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD1SH Zresult.S, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_F LD1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_G LD1SH {.S }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorInt16SignExtendToInt32(Vector mask, short* address) => LoadVectorInt16SignExtendToInt32(mask, address); + + + /// LoadVectorInt16SignExtendToInt64 : Load 16-bit data and sign-extend + + /// + /// svint64_t svld1sh_s64(svbool_t pg, const int16_t *base) + /// LD1SH Zresult.D, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD1SH Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_F LD1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_G LD1SH {.S }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorInt16SignExtendToInt64(Vector mask, short* address) => LoadVectorInt16SignExtendToInt64(mask, address); + + + /// LoadVectorInt16SignExtendToUInt32 : Load 16-bit data and sign-extend + + /// + /// svuint32_t svld1sh_u32(svbool_t pg, const int16_t *base) + /// LD1SH Zresult.S, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD1SH Zresult.S, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_F LD1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_G LD1SH {.S }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorInt16SignExtendToUInt32(Vector mask, short* address) => LoadVectorInt16SignExtendToUInt32(mask, address); + + + /// LoadVectorInt16SignExtendToUInt64 : Load 16-bit data and sign-extend + + /// + /// svuint64_t svld1sh_u64(svbool_t pg, const int16_t *base) + /// LD1SH Zresult.D, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD1SH Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_F LD1SH {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V7, REG_P3, REG_R5, 2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SH {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1SH {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1SH {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V3, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1SH {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SH {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1SH {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_G LD1SH {.S }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1SH {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sh, EA_SCALABLE, REG_V2, REG_P4, REG_V3, 2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorInt16SignExtendToUInt64(Vector mask, short* address) => LoadVectorInt16SignExtendToUInt64(mask, address); + + + /// LoadVectorInt32NonFaultingSignExtendToInt64 : Load 32-bit data and sign-extend, non-faulting + + /// + /// svint64_t svldnf1sw_s64(svbool_t pg, const int32_t *base) + /// LDNF1SW Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A LDNF1SW {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sw, EA_SCALABLE, REG_V0, REG_P0, REG_R0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sw, EA_SCALABLE, REG_V0, REG_P1, REG_R2, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorInt32NonFaultingSignExtendToInt64(int* address) => LoadVectorInt32NonFaultingSignExtendToInt64(address); + + + /// LoadVectorInt32NonFaultingSignExtendToUInt64 : Load 32-bit data and sign-extend, non-faulting + + /// + /// svuint64_t svldnf1sw_u64(svbool_t pg, const int32_t *base) + /// LDNF1SW Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A LDNF1SW {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sw, EA_SCALABLE, REG_V0, REG_P0, REG_R0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sw, EA_SCALABLE, REG_V0, REG_P1, REG_R2, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorInt32NonFaultingSignExtendToUInt64(int* address) => LoadVectorInt32NonFaultingSignExtendToUInt64(address); + + + /// LoadVectorInt32SignExtendFirstFaulting : Load 32-bit data and sign-extend, first-faulting + + /// + /// svint64_t svldff1sw_s64(svbool_t pg, const int32_t *base) + /// LDFF1SW Zresult.D, Pg/Z, [Xarray, Xindex, LSL #2] + /// LDFF1SW Zresult.D, Pg/Z, [Xbase, XZR, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1SW {.D }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P0, REG_R10, REG_V9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R6, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P0, REG_V4, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorInt32SignExtendFirstFaulting(Vector mask, int* address) => LoadVectorInt32SignExtendFirstFaulting(mask, address); + + /// + /// svuint64_t svldff1sw_u64(svbool_t pg, const int32_t *base) + /// LDFF1SW Zresult.D, Pg/Z, [Xarray, Xindex, LSL #2] + /// LDFF1SW Zresult.D, Pg/Z, [Xbase, XZR, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_IU_4A LDFF1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LDFF1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V0, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IG_4A LDFF1SW {.D }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LDFF1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P0, REG_R10, REG_V9, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LDFF1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sw, EA_SCALABLE, REG_V3, REG_P4, REG_R6, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LDFF1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sw, EA_SCALABLE, REG_V2, REG_P0, REG_V4, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorInt32SignExtendFirstFaulting(Vector mask, int* address) => LoadVectorInt32SignExtendFirstFaulting(mask, address); + + + /// LoadVectorInt32SignExtendToInt64 : Load 32-bit data and sign-extend + + /// + /// svint64_t svld1sw_s64(svbool_t pg, const int32_t *base) + /// LD1SW Zresult.D, Pg/Z, [Xarray, Xindex, LSL #2] + /// LD1SW Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A LD1SW {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P5, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IU_4A LD1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LD1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IK_4A LD1SW {.D }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LD1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V6, REG_P5, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorInt32SignExtendToInt64(Vector mask, int* address) => LoadVectorInt32SignExtendToInt64(mask, address); + + + /// LoadVectorInt32SignExtendToUInt64 : Load 32-bit data and sign-extend + + /// + /// svuint64_t svld1sw_u64(svbool_t pg, const int32_t *base) + /// LD1SW Zresult.D, Pg/Z, [Xarray, Xindex, LSL #2] + /// LD1SW Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A LD1SW {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P5, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IU_4A LD1SW {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_IU_4A_A LD1SW {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_IK_4A LD1SW {.D }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B LD1SW {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IU_4B_B LD1SW {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sw, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_IV_3A LD1SW {.D }, /Z, [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sw, EA_SCALABLE, REG_V6, REG_P5, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorInt32SignExtendToUInt64(Vector mask, int* address) => LoadVectorInt32SignExtendToUInt64(mask, address); + + + /// LoadVectorNonFaulting : Unextended load, non-faulting + + /// + /// svint8_t svldnf1[_s8](svbool_t pg, const int8_t *base) + /// LDNF1B Zresult.B, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_C LDNF1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, -4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, -2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorNonFaulting(sbyte* address) => LoadVectorNonFaulting(address); + + /// + /// svint16_t svldnf1[_s16](svbool_t pg, const int16_t *base) + /// LDNF1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_B LDNF1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1h, EA_SCALABLE, REG_V1, REG_P3, REG_R2, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1h, EA_SCALABLE, REG_V1, REG_P3, REG_R2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1h, EA_SCALABLE, REG_V1, REG_P3, REG_R2, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorNonFaulting(short* address) => LoadVectorNonFaulting(address); + + /// + /// svint32_t svldnf1[_s32](svbool_t pg, const int32_t *base) + /// LDNF1W Zresult.S, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_A LDNF1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1w, EA_SCALABLE, REG_V0, REG_P2, REG_R4, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1w, EA_SCALABLE, REG_V0, REG_P2, REG_R4, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorNonFaulting(int* address) => LoadVectorNonFaulting(address); + + /// + /// svint64_t svldnf1[_s64](svbool_t pg, const int64_t *base) + /// LDNF1D Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A LDNF1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1d, EA_SCALABLE, REG_V0, REG_P0, REG_R0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorNonFaulting(long* address) => LoadVectorNonFaulting(address); + + /// + /// svuint8_t svldnf1[_u8](svbool_t pg, const uint8_t *base) + /// LDNF1B Zresult.B, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_C LDNF1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, -4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, -2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1b, EA_SCALABLE, REG_V2, REG_P5, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorNonFaulting(byte* address) => LoadVectorNonFaulting(address); + + /// + /// svuint16_t svldnf1[_u16](svbool_t pg, const uint16_t *base) + /// LDNF1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_B LDNF1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1h, EA_SCALABLE, REG_V1, REG_P3, REG_R2, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1h, EA_SCALABLE, REG_V1, REG_P3, REG_R2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1h, EA_SCALABLE, REG_V1, REG_P3, REG_R2, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorNonFaulting(ushort* address) => LoadVectorNonFaulting(address); + + /// + /// svuint32_t svldnf1[_u32](svbool_t pg, const uint32_t *base) + /// LDNF1W Zresult.S, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_A LDNF1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1w, EA_SCALABLE, REG_V0, REG_P2, REG_R4, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1w, EA_SCALABLE, REG_V0, REG_P2, REG_R4, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorNonFaulting(uint* address) => LoadVectorNonFaulting(address); + + /// + /// svuint64_t svldnf1[_u64](svbool_t pg, const uint64_t *base) + /// LDNF1D Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A LDNF1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1d, EA_SCALABLE, REG_V0, REG_P0, REG_R0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorNonFaulting(ulong* address) => LoadVectorNonFaulting(address); + + /// + /// svfloat32_t svldnf1[_f32](svbool_t pg, const float32_t *base) + /// LDNF1W Zresult.S, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_A LDNF1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1w, EA_SCALABLE, REG_V0, REG_P2, REG_R4, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1w, EA_SCALABLE, REG_V0, REG_P2, REG_R4, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorNonFaulting(float* address) => LoadVectorNonFaulting(address); + + /// + /// svfloat64_t svldnf1[_f64](svbool_t pg, const float64_t *base) + /// LDNF1D Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A LDNF1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1d, EA_SCALABLE, REG_V0, REG_P0, REG_R0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorNonFaulting(double* address) => LoadVectorNonFaulting(address); + + + /// LoadVectorNonTemporal : Unextended load, non-temporal + + /// + /// svint8_t svldnt1[_s8](svbool_t pg, const int8_t *base) + /// LDNT1B Zresult.B, Pg/Z, [Xarray, Xindex] + /// LDNT1B Zresult.B, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1b, EA_SCALABLE, REG_V0, REG_P1, REG_R2, -5, INS_OPTS_SCALABLE_B); + /// IF_SVE_IF_4A LDNT1B {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V3, REG_P2, REG_V1, REG_R0, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1B {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V0, REG_P2, REG_V4, REG_R3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, sbyte* address) => LoadVectorNonTemporal(mask, address); + + /// + /// svint16_t svldnt1[_s16](svbool_t pg, const int16_t *base) + /// LDNT1H Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1] + /// LDNT1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1h, EA_SCALABLE, REG_V6, REG_P7, REG_R8, 0, INS_OPTS_SCALABLE_H); + /// IF_SVE_IF_4A LDNT1H {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_V2, REG_R3, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1H {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V1, REG_P4, REG_V3, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, short* address) => LoadVectorNonTemporal(mask, address); + + /// + /// svint32_t svldnt1[_s32](svbool_t pg, const int32_t *base) + /// LDNT1W Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2] + /// LDNT1W Zresult.S, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, int* address) => LoadVectorNonTemporal(mask, address); + + /// + /// svint64_t svldnt1[_s64](svbool_t pg, const int64_t *base) + /// LDNT1D Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3] + /// LDNT1D Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, -1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IX_4A LDNT1D {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V4, REG_P2, REG_V1, REG_R3, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, long* address) => LoadVectorNonTemporal(mask, address); + + /// + /// svuint8_t svldnt1[_u8](svbool_t pg, const uint8_t *base) + /// LDNT1B Zresult.B, Pg/Z, [Xarray, Xindex] + /// LDNT1B Zresult.B, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1b, EA_SCALABLE, REG_V0, REG_P1, REG_R2, -5, INS_OPTS_SCALABLE_B); + /// IF_SVE_IF_4A LDNT1B {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V3, REG_P2, REG_V1, REG_R0, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1B {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V0, REG_P2, REG_V4, REG_R3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, byte* address) => LoadVectorNonTemporal(mask, address); + + /// + /// svuint16_t svldnt1[_u16](svbool_t pg, const uint16_t *base) + /// LDNT1H Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1] + /// LDNT1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1h, EA_SCALABLE, REG_V6, REG_P7, REG_R8, 0, INS_OPTS_SCALABLE_H); + /// IF_SVE_IF_4A LDNT1H {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_V2, REG_R3, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1H {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V1, REG_P4, REG_V3, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, ushort* address) => LoadVectorNonTemporal(mask, address); + + /// + /// svuint32_t svldnt1[_u32](svbool_t pg, const uint32_t *base) + /// LDNT1W Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2] + /// LDNT1W Zresult.S, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, uint* address) => LoadVectorNonTemporal(mask, address); + + /// + /// svuint64_t svldnt1[_u64](svbool_t pg, const uint64_t *base) + /// LDNT1D Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3] + /// LDNT1D Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, -1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IX_4A LDNT1D {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V4, REG_P2, REG_V1, REG_R3, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, ulong* address) => LoadVectorNonTemporal(mask, address); + + /// + /// svfloat32_t svldnt1[_f32](svbool_t pg, const float32_t *base) + /// LDNT1W Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2] + /// LDNT1W Zresult.S, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, float* address) => LoadVectorNonTemporal(mask, address); + + /// + /// svfloat64_t svldnt1[_f64](svbool_t pg, const float64_t *base) + /// LDNT1D Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3] + /// LDNT1D Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, -1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IX_4A LDNT1D {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V4, REG_P2, REG_V1, REG_R3, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, double* address) => LoadVectorNonTemporal(mask, address); + + + /// LoadVectorSByteNonFaultingSignExtendToInt16 : Load 8-bit data and sign-extend, non-faulting + + /// + /// svint16_t svldnf1sb_s16(svbool_t pg, const int8_t *base) + /// LDNF1SB Zresult.H, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_B LDNF1SB {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sb, EA_SCALABLE, REG_V0, REG_P4, REG_R1, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sb, EA_SCALABLE, REG_V0, REG_P4, REG_R1, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sb, EA_SCALABLE, REG_V0, REG_P4, REG_R1, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToInt16(sbyte* address) => LoadVectorSByteNonFaultingSignExtendToInt16(address); + + + /// LoadVectorSByteNonFaultingSignExtendToInt32 : Load 8-bit data and sign-extend, non-faulting + + /// + /// svint32_t svldnf1sb_s32(svbool_t pg, const int8_t *base) + /// LDNF1SB Zresult.S, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_B LDNF1SB {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sb, EA_SCALABLE, REG_V0, REG_P4, REG_R1, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sb, EA_SCALABLE, REG_V0, REG_P4, REG_R1, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sb, EA_SCALABLE, REG_V0, REG_P4, REG_R1, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToInt32(sbyte* address) => LoadVectorSByteNonFaultingSignExtendToInt32(address); + + + /// LoadVectorSByteNonFaultingSignExtendToInt64 : Load 8-bit data and sign-extend, non-faulting + + /// + /// svint64_t svldnf1sb_s64(svbool_t pg, const int8_t *base) + /// LDNF1SB Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_B LDNF1SB {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sb, EA_SCALABLE, REG_V0, REG_P4, REG_R1, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sb, EA_SCALABLE, REG_V0, REG_P4, REG_R1, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sb, EA_SCALABLE, REG_V0, REG_P4, REG_R1, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToInt64(sbyte* address) => LoadVectorSByteNonFaultingSignExtendToInt64(address); + + + /// LoadVectorSByteNonFaultingSignExtendToUInt16 : Load 8-bit data and sign-extend, non-faulting + + /// + /// svuint16_t svldnf1sb_u16(svbool_t pg, const int8_t *base) + /// LDNF1SB Zresult.H, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_B LDNF1SB {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sb, EA_SCALABLE, REG_V0, REG_P4, REG_R1, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sb, EA_SCALABLE, REG_V0, REG_P4, REG_R1, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sb, EA_SCALABLE, REG_V0, REG_P4, REG_R1, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToUInt16(sbyte* address) => LoadVectorSByteNonFaultingSignExtendToUInt16(address); + + + /// LoadVectorSByteNonFaultingSignExtendToUInt32 : Load 8-bit data and sign-extend, non-faulting + + /// + /// svuint32_t svldnf1sb_u32(svbool_t pg, const int8_t *base) + /// LDNF1SB Zresult.S, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_B LDNF1SB {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sb, EA_SCALABLE, REG_V0, REG_P4, REG_R1, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sb, EA_SCALABLE, REG_V0, REG_P4, REG_R1, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sb, EA_SCALABLE, REG_V0, REG_P4, REG_R1, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToUInt32(sbyte* address) => LoadVectorSByteNonFaultingSignExtendToUInt32(address); + + + /// LoadVectorSByteNonFaultingSignExtendToUInt64 : Load 8-bit data and sign-extend, non-faulting + + /// + /// svuint64_t svldnf1sb_u64(svbool_t pg, const int8_t *base) + /// LDNF1SB Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_B LDNF1SB {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sb, EA_SCALABLE, REG_V0, REG_P4, REG_R1, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sb, EA_SCALABLE, REG_V0, REG_P4, REG_R1, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1sb, EA_SCALABLE, REG_V0, REG_P4, REG_R1, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToUInt64(sbyte* address) => LoadVectorSByteNonFaultingSignExtendToUInt64(address); + + + /// LoadVectorSByteSignExtendFirstFaulting : Load 8-bit data and sign-extend, first-faulting + + /// + /// svint16_t svldff1sb_s16(svbool_t pg, const int8_t *base) + /// LDFF1SB Zresult.H, Pg/Z, [Xarray, Xindex] + /// LDFF1SB Zresult.H, Pg/Z, [Xbase, XZR] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_D LDFF1SB {.H }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address) => LoadVectorSByteSignExtendFirstFaulting(mask, address); + + /// + /// svint32_t svldff1sb_s32(svbool_t pg, const int8_t *base) + /// LDFF1SB Zresult.S, Pg/Z, [Xarray, Xindex] + /// LDFF1SB Zresult.S, Pg/Z, [Xbase, XZR] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_D LDFF1SB {.H }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address) => LoadVectorSByteSignExtendFirstFaulting(mask, address); + + /// + /// svint64_t svldff1sb_s64(svbool_t pg, const int8_t *base) + /// LDFF1SB Zresult.D, Pg/Z, [Xarray, Xindex] + /// LDFF1SB Zresult.D, Pg/Z, [Xbase, XZR] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_D LDFF1SB {.H }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address) => LoadVectorSByteSignExtendFirstFaulting(mask, address); + + /// + /// svuint16_t svldff1sb_u16(svbool_t pg, const int8_t *base) + /// LDFF1SB Zresult.H, Pg/Z, [Xarray, Xindex] + /// LDFF1SB Zresult.H, Pg/Z, [Xbase, XZR] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_D LDFF1SB {.H }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address) => LoadVectorSByteSignExtendFirstFaulting(mask, address); + + /// + /// svuint32_t svldff1sb_u32(svbool_t pg, const int8_t *base) + /// LDFF1SB Zresult.S, Pg/Z, [Xarray, Xindex] + /// LDFF1SB Zresult.S, Pg/Z, [Xbase, XZR] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_D LDFF1SB {.H }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address) => LoadVectorSByteSignExtendFirstFaulting(mask, address); + + /// + /// svuint64_t svldff1sb_u64(svbool_t pg, const int8_t *base) + /// LDFF1SB Zresult.D, Pg/Z, [Xarray, Xindex] + /// LDFF1SB Zresult.D, Pg/Z, [Xbase, XZR] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LDFF1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P1, REG_R4, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_D LDFF1SB {.H }, /Z, [{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1sb, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LDFF1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1sb, EA_SCALABLE, REG_V2, REG_P6, REG_V0, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address) => LoadVectorSByteSignExtendFirstFaulting(mask, address); + + + /// LoadVectorSByteSignExtendToInt16 : Load 8-bit data and sign-extend + + /// + /// svint16_t svld1sb_s16(svbool_t pg, const int8_t *base) + /// LD1SB Zresult.H, Pg/Z, [Xarray, Xindex] + /// LD1SB Zresult.H, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_D LD1SB {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V6, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_F LD1SB {.H }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorSByteSignExtendToInt16(Vector mask, sbyte* address) => LoadVectorSByteSignExtendToInt16(mask, address); + + + /// LoadVectorSByteSignExtendToInt32 : Load 8-bit data and sign-extend + + /// + /// svint32_t svld1sb_s32(svbool_t pg, const int8_t *base) + /// LD1SB Zresult.S, Pg/Z, [Xarray, Xindex] + /// LD1SB Zresult.S, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_D LD1SB {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V6, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_F LD1SB {.H }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorSByteSignExtendToInt32(Vector mask, sbyte* address) => LoadVectorSByteSignExtendToInt32(mask, address); + + + /// LoadVectorSByteSignExtendToInt64 : Load 8-bit data and sign-extend + + /// + /// svint64_t svld1sb_s64(svbool_t pg, const int8_t *base) + /// LD1SB Zresult.D, Pg/Z, [Xarray, Xindex] + /// LD1SB Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_D LD1SB {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V6, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_F LD1SB {.H }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorSByteSignExtendToInt64(Vector mask, sbyte* address) => LoadVectorSByteSignExtendToInt64(mask, address); + + + /// LoadVectorSByteSignExtendToUInt16 : Load 8-bit data and sign-extend + + /// + /// svuint16_t svld1sb_u16(svbool_t pg, const int8_t *base) + /// LD1SB Zresult.H, Pg/Z, [Xarray, Xindex] + /// LD1SB Zresult.H, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_D LD1SB {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V6, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_F LD1SB {.H }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorSByteSignExtendToUInt16(Vector mask, sbyte* address) => LoadVectorSByteSignExtendToUInt16(mask, address); + + + /// LoadVectorSByteSignExtendToUInt32 : Load 8-bit data and sign-extend + + /// + /// svuint32_t svld1sb_u32(svbool_t pg, const int8_t *base) + /// LD1SB Zresult.S, Pg/Z, [Xarray, Xindex] + /// LD1SB Zresult.S, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_D LD1SB {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V6, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_F LD1SB {.H }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorSByteSignExtendToUInt32(Vector mask, sbyte* address) => LoadVectorSByteSignExtendToUInt32(mask, address); + + + /// LoadVectorSByteSignExtendToUInt64 : Load 8-bit data and sign-extend + + /// + /// svuint64_t svld1sb_u64(svbool_t pg, const int8_t *base) + /// LD1SB Zresult.D, Pg/Z, [Xarray, Xindex] + /// LD1SB Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_D LD1SB {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P0, REG_R2, 6, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1SB {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P0, REG_R1, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_A LD1SB {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1SB {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V6, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_F LD1SB {.H }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1sb, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_D); + /// IF_SVE_HX_3A_B LD1SB {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1sb, EA_SCALABLE, REG_V2, REG_P7, REG_V3, 5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorSByteSignExtendToUInt64(Vector mask, sbyte* address) => LoadVectorSByteSignExtendToUInt64(mask, address); + + + /// LoadVectorUInt16NonFaultingZeroExtendToInt32 : Load 16-bit data and zero-extend, non-faulting + + /// + /// svint32_t svldnf1uh_s32(svbool_t pg, const uint16_t *base) + /// LDNF1H Zresult.S, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_B LDNF1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1h, EA_SCALABLE, REG_V1, REG_P3, REG_R2, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1h, EA_SCALABLE, REG_V1, REG_P3, REG_R2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1h, EA_SCALABLE, REG_V1, REG_P3, REG_R2, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorUInt16NonFaultingZeroExtendToInt32(ushort* address) => LoadVectorUInt16NonFaultingZeroExtendToInt32(address); + + + /// LoadVectorUInt16NonFaultingZeroExtendToInt64 : Load 16-bit data and zero-extend, non-faulting + + /// + /// svint64_t svldnf1uh_s64(svbool_t pg, const uint16_t *base) + /// LDNF1H Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_B LDNF1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1h, EA_SCALABLE, REG_V1, REG_P3, REG_R2, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1h, EA_SCALABLE, REG_V1, REG_P3, REG_R2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1h, EA_SCALABLE, REG_V1, REG_P3, REG_R2, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorUInt16NonFaultingZeroExtendToInt64(ushort* address) => LoadVectorUInt16NonFaultingZeroExtendToInt64(address); + + + /// LoadVectorUInt16NonFaultingZeroExtendToUInt32 : Load 16-bit data and zero-extend, non-faulting + + /// + /// svuint32_t svldnf1uh_u32(svbool_t pg, const uint16_t *base) + /// LDNF1H Zresult.S, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_B LDNF1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1h, EA_SCALABLE, REG_V1, REG_P3, REG_R2, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1h, EA_SCALABLE, REG_V1, REG_P3, REG_R2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1h, EA_SCALABLE, REG_V1, REG_P3, REG_R2, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorUInt16NonFaultingZeroExtendToUInt32(ushort* address) => LoadVectorUInt16NonFaultingZeroExtendToUInt32(address); + + + /// LoadVectorUInt16NonFaultingZeroExtendToUInt64 : Load 16-bit data and zero-extend, non-faulting + + /// + /// svuint64_t svldnf1uh_u64(svbool_t pg, const uint16_t *base) + /// LDNF1H Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_B LDNF1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1h, EA_SCALABLE, REG_V1, REG_P3, REG_R2, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1h, EA_SCALABLE, REG_V1, REG_P3, REG_R2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1h, EA_SCALABLE, REG_V1, REG_P3, REG_R2, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorUInt16NonFaultingZeroExtendToUInt64(ushort* address) => LoadVectorUInt16NonFaultingZeroExtendToUInt64(address); + + + /// LoadVectorUInt16ZeroExtendFirstFaulting : Load 16-bit data and zero-extend, first-faulting + + /// + /// svint32_t svldff1uh_s32(svbool_t pg, const uint16_t *base) + /// LDFF1H Zresult.S, Pg/Z, [Xarray, Xindex, LSL #1] + /// LDFF1H Zresult.S, Pg/Z, [Xbase, XZR, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address) => LoadVectorUInt16ZeroExtendFirstFaulting(mask, address); + + /// + /// svint64_t svldff1uh_s64(svbool_t pg, const uint16_t *base) + /// LDFF1H Zresult.D, Pg/Z, [Xarray, Xindex, LSL #1] + /// LDFF1H Zresult.D, Pg/Z, [Xbase, XZR, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address) => LoadVectorUInt16ZeroExtendFirstFaulting(mask, address); + + /// + /// svuint32_t svldff1uh_u32(svbool_t pg, const uint16_t *base) + /// LDFF1H Zresult.S, Pg/Z, [Xarray, Xindex, LSL #1] + /// LDFF1H Zresult.S, Pg/Z, [Xbase, XZR, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address) => LoadVectorUInt16ZeroExtendFirstFaulting(mask, address); + + /// + /// svuint64_t svldff1uh_u64(svbool_t pg, const uint16_t *base) + /// LDFF1H Zresult.D, Pg/Z, [Xarray, Xindex, LSL #1] + /// LDFF1H Zresult.D, Pg/Z, [Xbase, XZR, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address) => LoadVectorUInt16ZeroExtendFirstFaulting(mask, address); + + + /// LoadVectorUInt16ZeroExtendToInt32 : Load 16-bit data and zero-extend + + /// + /// svint32_t svld1uh_s32(svbool_t pg, const uint16_t *base) + /// LD1H Zresult.S, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD1H Zresult.S, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorUInt16ZeroExtendToInt32(Vector mask, ushort* address) => LoadVectorUInt16ZeroExtendToInt32(mask, address); + + + /// LoadVectorUInt16ZeroExtendToInt64 : Load 16-bit data and zero-extend + + /// + /// svint64_t svld1uh_s64(svbool_t pg, const uint16_t *base) + /// LD1H Zresult.D, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD1H Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorUInt16ZeroExtendToInt64(Vector mask, ushort* address) => LoadVectorUInt16ZeroExtendToInt64(mask, address); + + + /// LoadVectorUInt16ZeroExtendToUInt32 : Load 16-bit data and zero-extend + + /// + /// svuint32_t svld1uh_u32(svbool_t pg, const uint16_t *base) + /// LD1H Zresult.S, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD1H Zresult.S, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorUInt16ZeroExtendToUInt32(Vector mask, ushort* address) => LoadVectorUInt16ZeroExtendToUInt32(mask, address); + + + /// LoadVectorUInt16ZeroExtendToUInt64 : Load 16-bit data and zero-extend + + /// + /// svuint64_t svld1uh_u64(svbool_t pg, const uint16_t *base) + /// LD1H Zresult.D, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD1H Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorUInt16ZeroExtendToUInt64(Vector mask, ushort* address) => LoadVectorUInt16ZeroExtendToUInt64(mask, address); + + + /// LoadVectorUInt32NonFaultingZeroExtendToInt64 : Load 32-bit data and zero-extend, non-faulting + + /// + /// svint64_t svldnf1uw_s64(svbool_t pg, const uint32_t *base) + /// LDNF1W Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_A LDNF1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1w, EA_SCALABLE, REG_V0, REG_P2, REG_R4, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1w, EA_SCALABLE, REG_V0, REG_P2, REG_R4, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorUInt32NonFaultingZeroExtendToInt64(uint* address) => LoadVectorUInt32NonFaultingZeroExtendToInt64(address); + + + /// LoadVectorUInt32NonFaultingZeroExtendToUInt64 : Load 32-bit data and zero-extend, non-faulting + + /// + /// svuint64_t svldnf1uw_u64(svbool_t pg, const uint32_t *base) + /// LDNF1W Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_A LDNF1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1w, EA_SCALABLE, REG_V0, REG_P2, REG_R4, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1w, EA_SCALABLE, REG_V0, REG_P2, REG_R4, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorUInt32NonFaultingZeroExtendToUInt64(uint* address) => LoadVectorUInt32NonFaultingZeroExtendToUInt64(address); + + + /// LoadVectorUInt32ZeroExtendFirstFaulting : Load 32-bit data and zero-extend, first-faulting + + /// + /// svint64_t svldff1uw_s64(svbool_t pg, const uint32_t *base) + /// LDFF1W Zresult.D, Pg/Z, [Xarray, Xindex, LSL #2] + /// LDFF1W Zresult.D, Pg/Z, [Xbase, XZR, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address) => LoadVectorUInt32ZeroExtendFirstFaulting(mask, address); + + /// + /// svuint64_t svldff1uw_u64(svbool_t pg, const uint32_t *base) + /// LDFF1W Zresult.D, Pg/Z, [Xarray, Xindex, LSL #2] + /// LDFF1W Zresult.D, Pg/Z, [Xbase, XZR, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V5, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P5, REG_R2, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V3, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_F LDFF1W {.S }, /Z, [{, , LSL #2}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1w, EA_SCALABLE, REG_V1, REG_P0, REG_R2, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1w, EA_SCALABLE, REG_V2, REG_P1, REG_V3, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address) => LoadVectorUInt32ZeroExtendFirstFaulting(mask, address); + + + /// LoadVectorUInt32ZeroExtendToInt64 : Load 32-bit data and zero-extend + + /// + /// svint64_t svld1uw_s64(svbool_t pg, const uint32_t *base) + /// LD1W Zresult.D, Pg/Z, [Xarray, Xindex, LSL #2] + /// LD1W Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorUInt32ZeroExtendToInt64(Vector mask, uint* address) => LoadVectorUInt32ZeroExtendToInt64(mask, address); + + + /// LoadVectorUInt32ZeroExtendToUInt64 : Load 32-bit data and zero-extend + + /// + /// svuint64_t svld1uw_u64(svbool_t pg, const uint32_t *base) + /// LD1W Zresult.D, Pg/Z, [Xarray, Xindex, LSL #2] + /// LD1W Zresult.D, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IH_3A_F LD1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, 3, INS_OPTS_SCALABLE_Q); + /// IF_SVE_HW_4A LD1W {.S }, /Z, [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_V1, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1W {.D }, /Z, [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1W {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V4, REG_P5, REG_R3, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1W {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V5, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1W {.D }, /Z, [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1W {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_II_4A_H LD1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1w, EA_SCALABLE, REG_V5, REG_P3, REG_R4, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1W {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1w, EA_SCALABLE, REG_V1, REG_P2, REG_V9, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorUInt32ZeroExtendToUInt64(Vector mask, uint* address) => LoadVectorUInt32ZeroExtendToUInt64(mask, address); + + + /// LoadVectorx2 : Load two-element tuples into two vectors + + /// + /// svint8x2_t svld2[_s8](svbool_t pg, const int8_t *base) + /// LD2B {Zresult0.B, Zresult1.B}, Pg/Z, [Xarray, Xindex] + /// LD2B {Zresult0.B, Zresult1.B}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD2B {.B, .B }, /Z, [{, #, MUL + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld2b, EA_SCALABLE, REG_V0, REG_P1, REG_R2, -16, INS_OPTS_SCALABLE_B); + /// IF_SVE_IT_4A LD2B {.B, .B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld2b, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, sbyte* address) => LoadVectorx2(mask, address); + + /// + /// svint16x2_t svld2[_s16](svbool_t pg, const int16_t *base) + /// LD2H {Zresult0.H, Zresult1.H}, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD2H {Zresult0.H, Zresult1.H}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD2H {.H, .H }, /Z, [{, #, MUL + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld2h, EA_SCALABLE, REG_V6, REG_P5, REG_R4, 8, INS_OPTS_SCALABLE_H); + /// IF_SVE_IT_4A LD2H {.H, .H }, /Z, [, , LSL + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld2h, EA_SCALABLE, REG_V8, REG_P5, REG_R9, REG_R10, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, short* address) => LoadVectorx2(mask, address); + + /// + /// svint32x2_t svld2[_s32](svbool_t pg, const int32_t *base) + /// LD2W {Zresult0.S, Zresult1.S}, Pg/Z, [Xarray, Xindex, LSL #2] + /// LD2W {Zresult0.S, Zresult1.S}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD2W {.S, .S }, /Z, [{, #, MUL + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld2w, EA_SCALABLE, REG_V0, REG_P0, REG_R1, 2, INS_OPTS_SCALABLE_S); + /// IF_SVE_IT_4A LD2W {.S, .S }, /Z, [, , LSL + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld2w, EA_SCALABLE, REG_V6, REG_P5, REG_R4, REG_R7, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, int* address) => LoadVectorx2(mask, address); + + /// + /// svint64x2_t svld2[_s64](svbool_t pg, const int64_t *base) + /// LD2D {Zresult0.D, Zresult1.D}, Pg/Z, [Xarray, Xindex, LSL #3] + /// LD2D {Zresult0.D, Zresult1.D}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD2D {.D, .D }, /Z, [{, #, MUL + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld2d, EA_SCALABLE, REG_V4, REG_P5, REG_R7, 14, INS_OPTS_SCALABLE_D); + /// IF_SVE_IT_4A LD2D {.D, .D }, /Z, [, , LSL + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld2d, EA_SCALABLE, REG_V7, REG_P6, REG_R5, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, long* address) => LoadVectorx2(mask, address); + + /// + /// svuint8x2_t svld2[_u8](svbool_t pg, const uint8_t *base) + /// LD2B {Zresult0.B, Zresult1.B}, Pg/Z, [Xarray, Xindex] + /// LD2B {Zresult0.B, Zresult1.B}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD2B {.B, .B }, /Z, [{, #, MUL + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld2b, EA_SCALABLE, REG_V0, REG_P1, REG_R2, -16, INS_OPTS_SCALABLE_B); + /// IF_SVE_IT_4A LD2B {.B, .B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld2b, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, byte* address) => LoadVectorx2(mask, address); + + /// + /// svuint16x2_t svld2[_u16](svbool_t pg, const uint16_t *base) + /// LD2H {Zresult0.H, Zresult1.H}, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD2H {Zresult0.H, Zresult1.H}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD2H {.H, .H }, /Z, [{, #, MUL + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld2h, EA_SCALABLE, REG_V6, REG_P5, REG_R4, 8, INS_OPTS_SCALABLE_H); + /// IF_SVE_IT_4A LD2H {.H, .H }, /Z, [, , LSL + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld2h, EA_SCALABLE, REG_V8, REG_P5, REG_R9, REG_R10, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, ushort* address) => LoadVectorx2(mask, address); + + /// + /// svuint32x2_t svld2[_u32](svbool_t pg, const uint32_t *base) + /// LD2W {Zresult0.S, Zresult1.S}, Pg/Z, [Xarray, Xindex, LSL #2] + /// LD2W {Zresult0.S, Zresult1.S}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD2W {.S, .S }, /Z, [{, #, MUL + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld2w, EA_SCALABLE, REG_V0, REG_P0, REG_R1, 2, INS_OPTS_SCALABLE_S); + /// IF_SVE_IT_4A LD2W {.S, .S }, /Z, [, , LSL + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld2w, EA_SCALABLE, REG_V6, REG_P5, REG_R4, REG_R7, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, uint* address) => LoadVectorx2(mask, address); + + /// + /// svuint64x2_t svld2[_u64](svbool_t pg, const uint64_t *base) + /// LD2D {Zresult0.D, Zresult1.D}, Pg/Z, [Xarray, Xindex, LSL #3] + /// LD2D {Zresult0.D, Zresult1.D}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD2D {.D, .D }, /Z, [{, #, MUL + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld2d, EA_SCALABLE, REG_V4, REG_P5, REG_R7, 14, INS_OPTS_SCALABLE_D); + /// IF_SVE_IT_4A LD2D {.D, .D }, /Z, [, , LSL + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld2d, EA_SCALABLE, REG_V7, REG_P6, REG_R5, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, ulong* address) => LoadVectorx2(mask, address); + + /// + /// svfloat32x2_t svld2[_f32](svbool_t pg, const float32_t *base) + /// LD2W {Zresult0.S, Zresult1.S}, Pg/Z, [Xarray, Xindex, LSL #2] + /// LD2W {Zresult0.S, Zresult1.S}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD2W {.S, .S }, /Z, [{, #, MUL + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld2w, EA_SCALABLE, REG_V0, REG_P0, REG_R1, 2, INS_OPTS_SCALABLE_S); + /// IF_SVE_IT_4A LD2W {.S, .S }, /Z, [, , LSL + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld2w, EA_SCALABLE, REG_V6, REG_P5, REG_R4, REG_R7, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, float* address) => LoadVectorx2(mask, address); + + /// + /// svfloat64x2_t svld2[_f64](svbool_t pg, const float64_t *base) + /// LD2D {Zresult0.D, Zresult1.D}, Pg/Z, [Xarray, Xindex, LSL #3] + /// LD2D {Zresult0.D, Zresult1.D}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD2D {.D, .D }, /Z, [{, #, MUL + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld2d, EA_SCALABLE, REG_V4, REG_P5, REG_R7, 14, INS_OPTS_SCALABLE_D); + /// IF_SVE_IT_4A LD2D {.D, .D }, /Z, [, , LSL + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld2d, EA_SCALABLE, REG_V7, REG_P6, REG_R5, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, double* address) => LoadVectorx2(mask, address); + + + /// LoadVectorx3 : Load three-element tuples into three vectors + + /// + /// svint8x3_t svld3[_s8](svbool_t pg, const int8_t *base) + /// LD3B {Zresult0.B - Zresult2.B}, Pg/Z, [Xarray, Xindex] + /// LD3B {Zresult0.B - Zresult2.B}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD3B {.B, .B, .B }, /Z, [{, + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld3b, EA_SCALABLE, REG_V0, REG_P0, REG_R0, 21, INS_OPTS_SCALABLE_B); + /// IF_SVE_IT_4A LD3B {.B, .B, .B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld3b, EA_SCALABLE, REG_V1, REG_P0, REG_R3, REG_R2, INS_OPTS_SCALABLE_B); + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, sbyte* address) => LoadVectorx3(mask, address); + + /// + /// svint16x3_t svld3[_s16](svbool_t pg, const int16_t *base) + /// LD3H {Zresult0.H - Zresult2.H}, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD3H {Zresult0.H - Zresult2.H}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD3H {.H, .H, .H }, /Z, [{, + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld3h, EA_SCALABLE, REG_V0, REG_P0, REG_R0, 21, INS_OPTS_SCALABLE_H); + /// IF_SVE_IT_4A LD3H {.H, .H, .H }, /Z, [, + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld3h, EA_SCALABLE, REG_V30, REG_P2, REG_R9, REG_R4, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, short* address) => LoadVectorx3(mask, address); + + /// + /// svint32x3_t svld3[_s32](svbool_t pg, const int32_t *base) + /// LD3W {Zresult0.S - Zresult2.S}, Pg/Z, [Xarray, Xindex, LSL #2] + /// LD3W {Zresult0.S - Zresult2.S}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD3W {.S, .S, .S }, /Z, [{, + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld3w, EA_SCALABLE, REG_V0, REG_P0, REG_R0, -24, INS_OPTS_SCALABLE_S); + /// IF_SVE_IT_4A LD3W {.S, .S, .S }, /Z, [, + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld3w, EA_SCALABLE, REG_V1, REG_P3, REG_R2, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, int* address) => LoadVectorx3(mask, address); + + /// + /// svint64x3_t svld3[_s64](svbool_t pg, const int64_t *base) + /// LD3D {Zresult0.D - Zresult2.D}, Pg/Z, [Xarray, Xindex, LSL #3] + /// LD3D {Zresult0.D - Zresult2.D}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD3D {.D, .D, .D }, /Z, [{, + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld3d, EA_SCALABLE, REG_V0, REG_P0, REG_R0, -24, INS_OPTS_SCALABLE_D); + /// IF_SVE_IT_4A LD3D {.D, .D, .D }, /Z, [, + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld3d, EA_SCALABLE, REG_V4, REG_P3, REG_R8, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, long* address) => LoadVectorx3(mask, address); + + /// + /// svuint8x3_t svld3[_u8](svbool_t pg, const uint8_t *base) + /// LD3B {Zresult0.B - Zresult2.B}, Pg/Z, [Xarray, Xindex] + /// LD3B {Zresult0.B - Zresult2.B}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD3B {.B, .B, .B }, /Z, [{, + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld3b, EA_SCALABLE, REG_V0, REG_P0, REG_R0, 21, INS_OPTS_SCALABLE_B); + /// IF_SVE_IT_4A LD3B {.B, .B, .B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld3b, EA_SCALABLE, REG_V1, REG_P0, REG_R3, REG_R2, INS_OPTS_SCALABLE_B); + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, byte* address) => LoadVectorx3(mask, address); + + /// + /// svuint16x3_t svld3[_u16](svbool_t pg, const uint16_t *base) + /// LD3H {Zresult0.H - Zresult2.H}, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD3H {Zresult0.H - Zresult2.H}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD3H {.H, .H, .H }, /Z, [{, + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld3h, EA_SCALABLE, REG_V0, REG_P0, REG_R0, 21, INS_OPTS_SCALABLE_H); + /// IF_SVE_IT_4A LD3H {.H, .H, .H }, /Z, [, + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld3h, EA_SCALABLE, REG_V30, REG_P2, REG_R9, REG_R4, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, ushort* address) => LoadVectorx3(mask, address); + + /// + /// svuint32x3_t svld3[_u32](svbool_t pg, const uint32_t *base) + /// LD3W {Zresult0.S - Zresult2.S}, Pg/Z, [Xarray, Xindex, LSL #2] + /// LD3W {Zresult0.S - Zresult2.S}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD3W {.S, .S, .S }, /Z, [{, + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld3w, EA_SCALABLE, REG_V0, REG_P0, REG_R0, -24, INS_OPTS_SCALABLE_S); + /// IF_SVE_IT_4A LD3W {.S, .S, .S }, /Z, [, + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld3w, EA_SCALABLE, REG_V1, REG_P3, REG_R2, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, uint* address) => LoadVectorx3(mask, address); + + /// + /// svuint64x3_t svld3[_u64](svbool_t pg, const uint64_t *base) + /// LD3D {Zresult0.D - Zresult2.D}, Pg/Z, [Xarray, Xindex, LSL #3] + /// LD3D {Zresult0.D - Zresult2.D}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD3D {.D, .D, .D }, /Z, [{, + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld3d, EA_SCALABLE, REG_V0, REG_P0, REG_R0, -24, INS_OPTS_SCALABLE_D); + /// IF_SVE_IT_4A LD3D {.D, .D, .D }, /Z, [, + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld3d, EA_SCALABLE, REG_V4, REG_P3, REG_R8, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, ulong* address) => LoadVectorx3(mask, address); + + /// + /// svfloat32x3_t svld3[_f32](svbool_t pg, const float32_t *base) + /// LD3W {Zresult0.S - Zresult2.S}, Pg/Z, [Xarray, Xindex, LSL #2] + /// LD3W {Zresult0.S - Zresult2.S}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD3W {.S, .S, .S }, /Z, [{, + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld3w, EA_SCALABLE, REG_V0, REG_P0, REG_R0, -24, INS_OPTS_SCALABLE_S); + /// IF_SVE_IT_4A LD3W {.S, .S, .S }, /Z, [, + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld3w, EA_SCALABLE, REG_V1, REG_P3, REG_R2, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, float* address) => LoadVectorx3(mask, address); + + /// + /// svfloat64x3_t svld3[_f64](svbool_t pg, const float64_t *base) + /// LD3D {Zresult0.D - Zresult2.D}, Pg/Z, [Xarray, Xindex, LSL #3] + /// LD3D {Zresult0.D - Zresult2.D}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD3D {.D, .D, .D }, /Z, [{, + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld3d, EA_SCALABLE, REG_V0, REG_P0, REG_R0, -24, INS_OPTS_SCALABLE_D); + /// IF_SVE_IT_4A LD3D {.D, .D, .D }, /Z, [, + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld3d, EA_SCALABLE, REG_V4, REG_P3, REG_R8, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, double* address) => LoadVectorx3(mask, address); + + + /// LoadVectorx4 : Load four-element tuples into four vectors + + /// + /// svint8x4_t svld4[_s8](svbool_t pg, const int8_t *base) + /// LD4B {Zresult0.B - Zresult3.B}, Pg/Z, [Xarray, Xindex] + /// LD4B {Zresult0.B - Zresult3.B}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD4B {.B, .B, .B, .B }, /Z, + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld4b, EA_SCALABLE, REG_V31, REG_P2, REG_R1, -32, INS_OPTS_SCALABLE_B); + /// IF_SVE_IT_4A LD4B {.B, .B, .B, .B }, /Z, + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld4b, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, sbyte* address) => LoadVectorx4(mask, address); + + /// + /// svint16x4_t svld4[_s16](svbool_t pg, const int16_t *base) + /// LD4H {Zresult0.H - Zresult3.H}, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD4H {Zresult0.H - Zresult3.H}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD4H {.H, .H, .H, .H }, /Z, + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld4h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, -32, INS_OPTS_SCALABLE_H); + /// IF_SVE_IT_4A LD4H {.H, .H, .H, .H }, /Z, + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld4h, EA_SCALABLE, REG_V13, REG_P6, REG_R5, REG_R4, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, short* address) => LoadVectorx4(mask, address); + + /// + /// svint32x4_t svld4[_s32](svbool_t pg, const int32_t *base) + /// LD4W {Zresult0.S - Zresult3.S}, Pg/Z, [Xarray, Xindex, LSL #2] + /// LD4W {Zresult0.S - Zresult3.S}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD4W {.S, .S, .S, .S }, /Z, + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld4w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, 28, INS_OPTS_SCALABLE_S); + /// IF_SVE_IT_4A LD4W {.S, .S, .S, .S }, /Z, + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld4w, EA_SCALABLE, REG_V10, REG_P3, REG_R2, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, int* address) => LoadVectorx4(mask, address); + + /// + /// svint64x4_t svld4[_s64](svbool_t pg, const int64_t *base) + /// LD4D {Zresult0.D - Zresult3.D}, Pg/Z, [Xarray, Xindex, LSL #3] + /// LD4D {Zresult0.D - Zresult3.D}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD4D {.D, .D, .D, .D }, /Z, + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld4d, EA_SCALABLE, REG_V8, REG_P0, REG_R0, 28, INS_OPTS_SCALABLE_D); + /// IF_SVE_IT_4A LD4D {.D, .D, .D, .D }, /Z, + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld4d, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, long* address) => LoadVectorx4(mask, address); + + /// + /// svuint8x4_t svld4[_u8](svbool_t pg, const uint8_t *base) + /// LD4B {Zresult0.B - Zresult3.B}, Pg/Z, [Xarray, Xindex] + /// LD4B {Zresult0.B - Zresult3.B}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD4B {.B, .B, .B, .B }, /Z, + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld4b, EA_SCALABLE, REG_V31, REG_P2, REG_R1, -32, INS_OPTS_SCALABLE_B); + /// IF_SVE_IT_4A LD4B {.B, .B, .B, .B }, /Z, + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld4b, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, byte* address) => LoadVectorx4(mask, address); + + /// + /// svuint16x4_t svld4[_u16](svbool_t pg, const uint16_t *base) + /// LD4H {Zresult0.H - Zresult3.H}, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD4H {Zresult0.H - Zresult3.H}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD4H {.H, .H, .H, .H }, /Z, + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld4h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, -32, INS_OPTS_SCALABLE_H); + /// IF_SVE_IT_4A LD4H {.H, .H, .H, .H }, /Z, + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld4h, EA_SCALABLE, REG_V13, REG_P6, REG_R5, REG_R4, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, ushort* address) => LoadVectorx4(mask, address); + + /// + /// svuint32x4_t svld4[_u32](svbool_t pg, const uint32_t *base) + /// LD4W {Zresult0.S - Zresult3.S}, Pg/Z, [Xarray, Xindex, LSL #2] + /// LD4W {Zresult0.S - Zresult3.S}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD4W {.S, .S, .S, .S }, /Z, + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld4w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, 28, INS_OPTS_SCALABLE_S); + /// IF_SVE_IT_4A LD4W {.S, .S, .S, .S }, /Z, + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld4w, EA_SCALABLE, REG_V10, REG_P3, REG_R2, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, uint* address) => LoadVectorx4(mask, address); + + /// + /// svuint64x4_t svld4[_u64](svbool_t pg, const uint64_t *base) + /// LD4D {Zresult0.D - Zresult3.D}, Pg/Z, [Xarray, Xindex, LSL #3] + /// LD4D {Zresult0.D - Zresult3.D}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD4D {.D, .D, .D, .D }, /Z, + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld4d, EA_SCALABLE, REG_V8, REG_P0, REG_R0, 28, INS_OPTS_SCALABLE_D); + /// IF_SVE_IT_4A LD4D {.D, .D, .D, .D }, /Z, + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld4d, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, ulong* address) => LoadVectorx4(mask, address); + + /// + /// svfloat32x4_t svld4[_f32](svbool_t pg, const float32_t *base) + /// LD4W {Zresult0.S - Zresult3.S}, Pg/Z, [Xarray, Xindex, LSL #2] + /// LD4W {Zresult0.S - Zresult3.S}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD4W {.S, .S, .S, .S }, /Z, + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld4w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, 28, INS_OPTS_SCALABLE_S); + /// IF_SVE_IT_4A LD4W {.S, .S, .S, .S }, /Z, + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld4w, EA_SCALABLE, REG_V10, REG_P3, REG_R2, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, float* address) => LoadVectorx4(mask, address); + + /// + /// svfloat64x4_t svld4[_f64](svbool_t pg, const float64_t *base) + /// LD4D {Zresult0.D - Zresult3.D}, Pg/Z, [Xarray, Xindex, LSL #3] + /// LD4D {Zresult0.D - Zresult3.D}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD4D {.D, .D, .D, .D }, /Z, + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld4d, EA_SCALABLE, REG_V8, REG_P0, REG_R0, 28, INS_OPTS_SCALABLE_D); + /// IF_SVE_IT_4A LD4D {.D, .D, .D, .D }, /Z, + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld4d, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, double* address) => LoadVectorx4(mask, address); + + + /// Max : Maximum + + /// + /// svint8_t svmax[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// SMAX Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; SMAX Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svmax[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// SMAX Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// SMAX Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; SMAX Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svmax[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; SMAX Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; SMAX Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_AD_3A SMAX ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smax, EA_SCALABLE, REG_V24, REG_P0, REG_V2, INS_OPTS_SCALABLE_B); + /// IF_SVE_ED_1A SMAX ., ., # + /// theEmitter->emitIns_R_I(INS_sve_smax, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_smax, EA_SCALABLE, REG_V1, 127, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Max(Vector left, Vector right) => Max(left, right); + + /// + /// svint16_t svmax[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// SMAX Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; SMAX Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svmax[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// SMAX Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// SMAX Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; SMAX Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svmax[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; SMAX Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; SMAX Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_AD_3A SMAX ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smax, EA_SCALABLE, REG_V24, REG_P0, REG_V2, INS_OPTS_SCALABLE_B); + /// IF_SVE_ED_1A SMAX ., ., # + /// theEmitter->emitIns_R_I(INS_sve_smax, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_smax, EA_SCALABLE, REG_V1, 127, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Max(Vector left, Vector right) => Max(left, right); + + /// + /// svint32_t svmax[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// SMAX Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; SMAX Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svmax[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// SMAX Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// SMAX Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; SMAX Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svmax[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; SMAX Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; SMAX Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_AD_3A SMAX ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smax, EA_SCALABLE, REG_V24, REG_P0, REG_V2, INS_OPTS_SCALABLE_B); + /// IF_SVE_ED_1A SMAX ., ., # + /// theEmitter->emitIns_R_I(INS_sve_smax, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_smax, EA_SCALABLE, REG_V1, 127, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Max(Vector left, Vector right) => Max(left, right); + + /// + /// svint64_t svmax[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// SMAX Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; SMAX Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svmax[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// SMAX Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// SMAX Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; SMAX Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svmax[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; SMAX Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; SMAX Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_AD_3A SMAX ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smax, EA_SCALABLE, REG_V24, REG_P0, REG_V2, INS_OPTS_SCALABLE_B); + /// IF_SVE_ED_1A SMAX ., ., # + /// theEmitter->emitIns_R_I(INS_sve_smax, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_smax, EA_SCALABLE, REG_V1, 127, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Max(Vector left, Vector right) => Max(left, right); + + /// + /// svuint8_t svmax[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// UMAX Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; UMAX Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svmax[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// UMAX Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// UMAX Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; UMAX Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svmax[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; UMAX Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; UMAX Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_AD_3A UMAX ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umax, EA_SCALABLE, REG_V15, REG_P4, REG_V2, INS_OPTS_SCALABLE_S); + /// IF_SVE_ED_1A UMAX ., ., # + /// theEmitter->emitIns_R_I(INS_sve_umax, EA_SCALABLE, REG_V4, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_umax, EA_SCALABLE, REG_V5, 255, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Max(Vector left, Vector right) => Max(left, right); + + /// + /// svuint16_t svmax[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// UMAX Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; UMAX Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svmax[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// UMAX Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// UMAX Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; UMAX Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svmax[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; UMAX Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; UMAX Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_AD_3A UMAX ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umax, EA_SCALABLE, REG_V15, REG_P4, REG_V2, INS_OPTS_SCALABLE_S); + /// IF_SVE_ED_1A UMAX ., ., # + /// theEmitter->emitIns_R_I(INS_sve_umax, EA_SCALABLE, REG_V4, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_umax, EA_SCALABLE, REG_V5, 255, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Max(Vector left, Vector right) => Max(left, right); + + /// + /// svuint32_t svmax[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// UMAX Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; UMAX Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svmax[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// UMAX Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// UMAX Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; UMAX Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svmax[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; UMAX Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; UMAX Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_AD_3A UMAX ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umax, EA_SCALABLE, REG_V15, REG_P4, REG_V2, INS_OPTS_SCALABLE_S); + /// IF_SVE_ED_1A UMAX ., ., # + /// theEmitter->emitIns_R_I(INS_sve_umax, EA_SCALABLE, REG_V4, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_umax, EA_SCALABLE, REG_V5, 255, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Max(Vector left, Vector right) => Max(left, right); + + /// + /// svuint64_t svmax[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// UMAX Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; UMAX Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svmax[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// UMAX Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// UMAX Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; UMAX Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svmax[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; UMAX Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; UMAX Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_AD_3A UMAX ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umax, EA_SCALABLE, REG_V15, REG_P4, REG_V2, INS_OPTS_SCALABLE_S); + /// IF_SVE_ED_1A UMAX ., ., # + /// theEmitter->emitIns_R_I(INS_sve_umax, EA_SCALABLE, REG_V4, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_umax, EA_SCALABLE, REG_V5, 255, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Max(Vector left, Vector right) => Max(left, right); + + /// + /// svfloat32_t svmax[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FMAX Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; FMAX Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svfloat32_t svmax[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FMAX Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// FMAX Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; FMAX Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svfloat32_t svmax[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; FMAX Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; FMAX Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FMAX ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmax, EA_SCALABLE, REG_V30, REG_P2, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_HM_2A FMAX ., /M, ., + /// theEmitter->emitIns_R_R_F(INS_sve_fmax, EA_SCALABLE, REG_V1, REG_P0, 0.0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_F(INS_sve_fmax, EA_SCALABLE, REG_V1, REG_P0, 1.0, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Max(Vector left, Vector right) => Max(left, right); + + /// + /// svfloat64_t svmax[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FMAX Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; FMAX Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svfloat64_t svmax[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FMAX Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// FMAX Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; FMAX Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svfloat64_t svmax[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; FMAX Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; FMAX Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FMAX ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmax, EA_SCALABLE, REG_V30, REG_P2, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_HM_2A FMAX ., /M, ., + /// theEmitter->emitIns_R_R_F(INS_sve_fmax, EA_SCALABLE, REG_V1, REG_P0, 0.0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_F(INS_sve_fmax, EA_SCALABLE, REG_V1, REG_P0, 1.0, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Max(Vector left, Vector right) => Max(left, right); + + + /// MaxAcross : Maximum reduction to scalar + + /// + /// int8_t svmaxv[_s8](svbool_t pg, svint8_t op) + /// SMAXV Bresult, Pg, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_AK_3A SMAXV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_smaxv, EA_8BYTE, REG_V15, REG_P7, REG_V4, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxAcross(Vector value) => MaxAcross(value); + + /// + /// int16_t svmaxv[_s16](svbool_t pg, svint16_t op) + /// SMAXV Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_AK_3A SMAXV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_smaxv, EA_8BYTE, REG_V15, REG_P7, REG_V4, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxAcross(Vector value) => MaxAcross(value); + + /// + /// int32_t svmaxv[_s32](svbool_t pg, svint32_t op) + /// SMAXV Sresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AK_3A SMAXV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_smaxv, EA_8BYTE, REG_V15, REG_P7, REG_V4, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxAcross(Vector value) => MaxAcross(value); + + /// + /// int64_t svmaxv[_s64](svbool_t pg, svint64_t op) + /// SMAXV Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AK_3A SMAXV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_smaxv, EA_8BYTE, REG_V15, REG_P7, REG_V4, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxAcross(Vector value) => MaxAcross(value); + + /// + /// uint8_t svmaxv[_u8](svbool_t pg, svuint8_t op) + /// UMAXV Bresult, Pg, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_AK_3A UMAXV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_umaxv, EA_2BYTE, REG_V17, REG_P5, REG_V24, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxAcross(Vector value) => MaxAcross(value); + + /// + /// uint16_t svmaxv[_u16](svbool_t pg, svuint16_t op) + /// UMAXV Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_AK_3A UMAXV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_umaxv, EA_2BYTE, REG_V17, REG_P5, REG_V24, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxAcross(Vector value) => MaxAcross(value); + + /// + /// uint32_t svmaxv[_u32](svbool_t pg, svuint32_t op) + /// UMAXV Sresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AK_3A UMAXV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_umaxv, EA_2BYTE, REG_V17, REG_P5, REG_V24, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxAcross(Vector value) => MaxAcross(value); + + /// + /// uint64_t svmaxv[_u64](svbool_t pg, svuint64_t op) + /// UMAXV Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AK_3A UMAXV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_umaxv, EA_2BYTE, REG_V17, REG_P5, REG_V24, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxAcross(Vector value) => MaxAcross(value); + + /// + /// float32_t svmaxv[_f32](svbool_t pg, svfloat32_t op) + /// FMAXV Sresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_HE_3A FMAXV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_fmaxv, EA_4BYTE, REG_V23, REG_P5, REG_V5, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxAcross(Vector value) => MaxAcross(value); + + /// + /// float64_t svmaxv[_f64](svbool_t pg, svfloat64_t op) + /// FMAXV Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_HE_3A FMAXV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_fmaxv, EA_4BYTE, REG_V23, REG_P5, REG_V5, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxAcross(Vector value) => MaxAcross(value); + + + /// MaxNumber : Maximum number + + /// + /// svfloat32_t svmaxnm[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FMAXNM Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; FMAXNM Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svfloat32_t svmaxnm[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FMAXNM Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// FMAXNM Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; FMAXNM Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svfloat32_t svmaxnm[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; FMAXNM Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; FMAXNM Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FMAXNM ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmaxnm, EA_SCALABLE, REG_V31, REG_P3, REG_V4, INS_OPTS_SCALABLE_S); + /// IF_SVE_HM_2A FMAXNM ., /M, ., + /// theEmitter->emitIns_R_R_F(INS_sve_fmaxnm, EA_SCALABLE, REG_V3, REG_P4, 0.0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_F(INS_sve_fmaxnm, EA_SCALABLE, REG_V3, REG_P4, 1.0, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxNumber(Vector left, Vector right) => MaxNumber(left, right); + + /// + /// svfloat64_t svmaxnm[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FMAXNM Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; FMAXNM Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svfloat64_t svmaxnm[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FMAXNM Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// FMAXNM Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; FMAXNM Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svfloat64_t svmaxnm[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; FMAXNM Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; FMAXNM Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FMAXNM ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmaxnm, EA_SCALABLE, REG_V31, REG_P3, REG_V4, INS_OPTS_SCALABLE_S); + /// IF_SVE_HM_2A FMAXNM ., /M, ., + /// theEmitter->emitIns_R_R_F(INS_sve_fmaxnm, EA_SCALABLE, REG_V3, REG_P4, 0.0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_F(INS_sve_fmaxnm, EA_SCALABLE, REG_V3, REG_P4, 1.0, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxNumber(Vector left, Vector right) => MaxNumber(left, right); + + + /// MaxNumberAcross : Maximum number reduction to scalar + + /// + /// float32_t svmaxnmv[_f32](svbool_t pg, svfloat32_t op) + /// FMAXNMV Sresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_HE_3A FMAXNMV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_fmaxnmv, EA_2BYTE, REG_V22, REG_P6, REG_V6, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxNumberAcross(Vector value) => MaxNumberAcross(value); + + /// + /// float64_t svmaxnmv[_f64](svbool_t pg, svfloat64_t op) + /// FMAXNMV Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_HE_3A FMAXNMV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_fmaxnmv, EA_2BYTE, REG_V22, REG_P6, REG_V6, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxNumberAcross(Vector value) => MaxNumberAcross(value); + + + /// Min : Minimum + + /// + /// svint8_t svmin[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// SMIN Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; SMIN Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svmin[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// SMIN Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// SMIN Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; SMIN Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svmin[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; SMIN Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; SMIN Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_AD_3A SMIN ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smin, EA_SCALABLE, REG_V9, REG_P1, REG_V27, INS_OPTS_SCALABLE_H); + /// IF_SVE_ED_1A SMIN ., ., # + /// theEmitter->emitIns_R_I(INS_sve_smin, EA_SCALABLE, REG_V2, -128, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_smin, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Min(Vector left, Vector right) => Min(left, right); + + /// + /// svint16_t svmin[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// SMIN Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; SMIN Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svmin[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// SMIN Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// SMIN Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; SMIN Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svmin[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; SMIN Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; SMIN Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_AD_3A SMIN ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smin, EA_SCALABLE, REG_V9, REG_P1, REG_V27, INS_OPTS_SCALABLE_H); + /// IF_SVE_ED_1A SMIN ., ., # + /// theEmitter->emitIns_R_I(INS_sve_smin, EA_SCALABLE, REG_V2, -128, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_smin, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Min(Vector left, Vector right) => Min(left, right); + + /// + /// svint32_t svmin[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// SMIN Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; SMIN Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svmin[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// SMIN Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// SMIN Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; SMIN Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svmin[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; SMIN Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; SMIN Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_AD_3A SMIN ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smin, EA_SCALABLE, REG_V9, REG_P1, REG_V27, INS_OPTS_SCALABLE_H); + /// IF_SVE_ED_1A SMIN ., ., # + /// theEmitter->emitIns_R_I(INS_sve_smin, EA_SCALABLE, REG_V2, -128, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_smin, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Min(Vector left, Vector right) => Min(left, right); + + /// + /// svint64_t svmin[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// SMIN Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; SMIN Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svmin[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// SMIN Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// SMIN Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; SMIN Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svmin[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; SMIN Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; SMIN Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_AD_3A SMIN ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smin, EA_SCALABLE, REG_V9, REG_P1, REG_V27, INS_OPTS_SCALABLE_H); + /// IF_SVE_ED_1A SMIN ., ., # + /// theEmitter->emitIns_R_I(INS_sve_smin, EA_SCALABLE, REG_V2, -128, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_smin, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Min(Vector left, Vector right) => Min(left, right); + + /// + /// svuint8_t svmin[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// UMIN Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; UMIN Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svmin[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// UMIN Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// UMIN Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; UMIN Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svmin[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; UMIN Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; UMIN Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_AD_3A UMIN ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umin, EA_SCALABLE, REG_V12, REG_P7, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_ED_1A UMIN ., ., # + /// theEmitter->emitIns_R_I(INS_sve_umin, EA_SCALABLE, REG_V6, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_umin, EA_SCALABLE, REG_V7, 255, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Min(Vector left, Vector right) => Min(left, right); + + /// + /// svuint16_t svmin[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// UMIN Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; UMIN Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svmin[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// UMIN Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// UMIN Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; UMIN Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svmin[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; UMIN Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; UMIN Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_AD_3A UMIN ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umin, EA_SCALABLE, REG_V12, REG_P7, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_ED_1A UMIN ., ., # + /// theEmitter->emitIns_R_I(INS_sve_umin, EA_SCALABLE, REG_V6, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_umin, EA_SCALABLE, REG_V7, 255, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Min(Vector left, Vector right) => Min(left, right); + + /// + /// svuint32_t svmin[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// UMIN Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; UMIN Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svmin[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// UMIN Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// UMIN Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; UMIN Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svmin[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; UMIN Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; UMIN Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_AD_3A UMIN ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umin, EA_SCALABLE, REG_V12, REG_P7, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_ED_1A UMIN ., ., # + /// theEmitter->emitIns_R_I(INS_sve_umin, EA_SCALABLE, REG_V6, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_umin, EA_SCALABLE, REG_V7, 255, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Min(Vector left, Vector right) => Min(left, right); + + /// + /// svuint64_t svmin[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// UMIN Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; UMIN Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svmin[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// UMIN Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// UMIN Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; UMIN Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svmin[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; UMIN Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; UMIN Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_AD_3A UMIN ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umin, EA_SCALABLE, REG_V12, REG_P7, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_ED_1A UMIN ., ., # + /// theEmitter->emitIns_R_I(INS_sve_umin, EA_SCALABLE, REG_V6, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_umin, EA_SCALABLE, REG_V7, 255, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Min(Vector left, Vector right) => Min(left, right); + + /// + /// svfloat32_t svmin[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FMIN Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; FMIN Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svfloat32_t svmin[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FMIN Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// FMIN Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; FMIN Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svfloat32_t svmin[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; FMIN Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; FMIN Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FMIN ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmin, EA_SCALABLE, REG_V0, REG_P4, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_HM_2A FMIN ., /M, ., + /// theEmitter->emitIns_R_R_F(INS_sve_fmin, EA_SCALABLE, REG_V6, REG_P5, 0.0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_F(INS_sve_fmin, EA_SCALABLE, REG_V6, REG_P5, 1.0, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Min(Vector left, Vector right) => Min(left, right); + + /// + /// svfloat64_t svmin[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FMIN Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; FMIN Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svfloat64_t svmin[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FMIN Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// FMIN Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; FMIN Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svfloat64_t svmin[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; FMIN Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; FMIN Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FMIN ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmin, EA_SCALABLE, REG_V0, REG_P4, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_HM_2A FMIN ., /M, ., + /// theEmitter->emitIns_R_R_F(INS_sve_fmin, EA_SCALABLE, REG_V6, REG_P5, 0.0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_F(INS_sve_fmin, EA_SCALABLE, REG_V6, REG_P5, 1.0, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Min(Vector left, Vector right) => Min(left, right); + + + /// MinAcross : Minimum reduction to scalar + + /// + /// int8_t svminv[_s8](svbool_t pg, svint8_t op) + /// SMINV Bresult, Pg, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_AK_3A SMINV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_sminv, EA_4BYTE, REG_V16, REG_P6, REG_V14, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinAcross(Vector value) => MinAcross(value); + + /// + /// int16_t svminv[_s16](svbool_t pg, svint16_t op) + /// SMINV Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_AK_3A SMINV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_sminv, EA_4BYTE, REG_V16, REG_P6, REG_V14, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinAcross(Vector value) => MinAcross(value); + + /// + /// int32_t svminv[_s32](svbool_t pg, svint32_t op) + /// SMINV Sresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AK_3A SMINV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_sminv, EA_4BYTE, REG_V16, REG_P6, REG_V14, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinAcross(Vector value) => MinAcross(value); + + /// + /// int64_t svminv[_s64](svbool_t pg, svint64_t op) + /// SMINV Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AK_3A SMINV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_sminv, EA_4BYTE, REG_V16, REG_P6, REG_V14, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinAcross(Vector value) => MinAcross(value); + + /// + /// uint8_t svminv[_u8](svbool_t pg, svuint8_t op) + /// UMINV Bresult, Pg, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_AK_3A UMINV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_uminv, EA_1BYTE, REG_V18, REG_P4, REG_V31, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinAcross(Vector value) => MinAcross(value); + + /// + /// uint16_t svminv[_u16](svbool_t pg, svuint16_t op) + /// UMINV Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_AK_3A UMINV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_uminv, EA_1BYTE, REG_V18, REG_P4, REG_V31, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinAcross(Vector value) => MinAcross(value); + + /// + /// uint32_t svminv[_u32](svbool_t pg, svuint32_t op) + /// UMINV Sresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AK_3A UMINV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_uminv, EA_1BYTE, REG_V18, REG_P4, REG_V31, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinAcross(Vector value) => MinAcross(value); + + /// + /// uint64_t svminv[_u64](svbool_t pg, svuint64_t op) + /// UMINV Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AK_3A UMINV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_uminv, EA_1BYTE, REG_V18, REG_P4, REG_V31, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinAcross(Vector value) => MinAcross(value); + + /// + /// float32_t svminv[_f32](svbool_t pg, svfloat32_t op) + /// FMINV Sresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_HE_3A FMINV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_fminv, EA_4BYTE, REG_V25, REG_P3, REG_V3, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinAcross(Vector value) => MinAcross(value); + + /// + /// float64_t svminv[_f64](svbool_t pg, svfloat64_t op) + /// FMINV Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_HE_3A FMINV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_fminv, EA_4BYTE, REG_V25, REG_P3, REG_V3, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinAcross(Vector value) => MinAcross(value); + + + /// MinNumber : Minimum number + + /// + /// svfloat32_t svminnm[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FMINNM Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; FMINNM Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svfloat32_t svminnm[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FMINNM Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// FMINNM Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; FMINNM Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svfloat32_t svminnm[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; FMINNM Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; FMINNM Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FMINNM ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fminnm, EA_SCALABLE, REG_V1, REG_P5, REG_V2, INS_OPTS_SCALABLE_H); + /// IF_SVE_HM_2A FMINNM ., /M, ., + /// theEmitter->emitIns_R_R_F(INS_sve_fminnm, EA_SCALABLE, REG_V2, REG_P4, 0.0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_F(INS_sve_fminnm, EA_SCALABLE, REG_V2, REG_P4, 1.0, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinNumber(Vector left, Vector right) => MinNumber(left, right); + + /// + /// svfloat64_t svminnm[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FMINNM Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; FMINNM Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svfloat64_t svminnm[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FMINNM Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// FMINNM Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; FMINNM Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svfloat64_t svminnm[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; FMINNM Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; FMINNM Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FMINNM ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fminnm, EA_SCALABLE, REG_V1, REG_P5, REG_V2, INS_OPTS_SCALABLE_H); + /// IF_SVE_HM_2A FMINNM ., /M, ., + /// theEmitter->emitIns_R_R_F(INS_sve_fminnm, EA_SCALABLE, REG_V2, REG_P4, 0.0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_F(INS_sve_fminnm, EA_SCALABLE, REG_V2, REG_P4, 1.0, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinNumber(Vector left, Vector right) => MinNumber(left, right); + + + /// MinNumberAcross : Minimum number reduction to scalar + + /// + /// float32_t svminnmv[_f32](svbool_t pg, svfloat32_t op) + /// FMINNMV Sresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_HE_3A FMINNMV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_fminnmv, EA_8BYTE, REG_V24, REG_P4, REG_V4, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinNumberAcross(Vector value) => MinNumberAcross(value); + + /// + /// float64_t svminnmv[_f64](svbool_t pg, svfloat64_t op) + /// FMINNMV Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_HE_3A FMINNMV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_fminnmv, EA_8BYTE, REG_V24, REG_P4, REG_V4, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinNumberAcross(Vector value) => MinNumberAcross(value); + + + + /// Multiply : Multiply + + /// + /// svint8_t svmul[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// MUL Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; MUL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svmul[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// MUL Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MUL Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; MUL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svmul[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; MUL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; MUL Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_AE_3A MUL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_P1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_BD_3A MUL ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_V0, REG_V31, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_FD_3A MUL .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FD_3B MUL .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FD_3C MUL .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_EE_1A MUL ., ., # + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Multiply(Vector left, Vector right) => Multiply(left, right); + + /// + /// svint16_t svmul[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// MUL Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; MUL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svmul[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// MUL Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MUL Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; MUL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svmul[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; MUL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; MUL Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_AE_3A MUL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_P1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_BD_3A MUL ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_V0, REG_V31, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_FD_3A MUL .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FD_3B MUL .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FD_3C MUL .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_EE_1A MUL ., ., # + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Multiply(Vector left, Vector right) => Multiply(left, right); + + /// + /// svint32_t svmul[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// MUL Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; MUL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svmul[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// MUL Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MUL Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; MUL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svmul[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; MUL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; MUL Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_AE_3A MUL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_P1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_BD_3A MUL ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_V0, REG_V31, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_FD_3A MUL .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FD_3B MUL .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FD_3C MUL .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_EE_1A MUL ., ., # + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Multiply(Vector left, Vector right) => Multiply(left, right); + + /// + /// svint64_t svmul[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// MUL Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; MUL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svmul[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// MUL Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MUL Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; MUL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svmul[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; MUL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; MUL Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_AE_3A MUL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_P1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_BD_3A MUL ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_V0, REG_V31, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_FD_3A MUL .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FD_3B MUL .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FD_3C MUL .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_EE_1A MUL ., ., # + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Multiply(Vector left, Vector right) => Multiply(left, right); + + /// + /// svuint8_t svmul[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// MUL Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; MUL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svmul[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// MUL Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MUL Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; MUL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svmul[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; MUL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; MUL Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_AE_3A MUL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_P1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_BD_3A MUL ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_V0, REG_V31, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_FD_3A MUL .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FD_3B MUL .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FD_3C MUL .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_EE_1A MUL ., ., # + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Multiply(Vector left, Vector right) => Multiply(left, right); + + /// + /// svuint16_t svmul[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// MUL Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; MUL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svmul[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// MUL Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MUL Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; MUL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svmul[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; MUL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; MUL Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_AE_3A MUL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_P1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_BD_3A MUL ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_V0, REG_V31, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_FD_3A MUL .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FD_3B MUL .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FD_3C MUL .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_EE_1A MUL ., ., # + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Multiply(Vector left, Vector right) => Multiply(left, right); + + /// + /// svuint32_t svmul[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// MUL Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; MUL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svmul[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// MUL Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MUL Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; MUL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svmul[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; MUL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; MUL Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_AE_3A MUL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_P1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_BD_3A MUL ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_V0, REG_V31, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_FD_3A MUL .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FD_3B MUL .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FD_3C MUL .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_EE_1A MUL ., ., # + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Multiply(Vector left, Vector right) => Multiply(left, right); + + /// + /// svuint64_t svmul[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// MUL Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; MUL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svmul[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// MUL Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MUL Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; MUL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svmul[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; MUL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; MUL Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_AE_3A MUL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_P1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_BD_3A MUL ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_V0, REG_V31, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_FD_3A MUL .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FD_3B MUL .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FD_3C MUL .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_EE_1A MUL ., ., # + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Multiply(Vector left, Vector right) => Multiply(left, right); + + /// + /// svfloat32_t svmul[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FMUL Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; FMUL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svfloat32_t svmul[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FMUL Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// FMUL Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// FMUL Zresult.S, Zop1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; FMUL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svfloat32_t svmul[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; FMUL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; FMUL Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FMUL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmul, EA_SCALABLE, REG_V2, REG_P6, REG_V1, INS_OPTS_SCALABLE_S); + /// IF_SVE_HK_3A FMUL ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmul, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_GX_3A FMUL .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V0, REG_V2, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V4, REG_V6, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V8, REG_V10, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V12, REG_V14, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_GX_3B FMUL .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V1, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V3, REG_V2, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V5, REG_V4, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V7, REG_V6, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HM_2A FMUL ., /M, ., + /// theEmitter->emitIns_R_R_F(INS_sve_fmul, EA_SCALABLE, REG_V5, REG_P1, 0.5, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_F(INS_sve_fmul, EA_SCALABLE, REG_V5, REG_P1, 2.0, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Multiply(Vector left, Vector right) => Multiply(left, right); + + /// + /// svfloat64_t svmul[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FMUL Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; FMUL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svfloat64_t svmul[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FMUL Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// FMUL Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// FMUL Zresult.D, Zop1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; FMUL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svfloat64_t svmul[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; FMUL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; FMUL Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FMUL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmul, EA_SCALABLE, REG_V2, REG_P6, REG_V1, INS_OPTS_SCALABLE_S); + /// IF_SVE_HK_3A FMUL ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmul, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_GX_3A FMUL .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V0, REG_V2, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V4, REG_V6, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V8, REG_V10, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V12, REG_V14, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_GX_3B FMUL .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V1, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V3, REG_V2, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V5, REG_V4, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V7, REG_V6, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HM_2A FMUL ., /M, ., + /// theEmitter->emitIns_R_R_F(INS_sve_fmul, EA_SCALABLE, REG_V5, REG_P1, 0.5, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_F(INS_sve_fmul, EA_SCALABLE, REG_V5, REG_P1, 2.0, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Multiply(Vector left, Vector right) => Multiply(left, right); + + + /// MultiplyAdd : Multiply-add, addend first + + /// + /// svint8_t svmla[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3) + /// MLA Ztied1.B, Pg/M, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; MLA Zresult.B, Pg/M, Zop2.B, Zop3.B + /// svint8_t svmla[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3) + /// MLA Ztied1.B, Pg/M, Zop2.B, Zop3.B + /// MAD Ztied2.B, Pg/M, Zop3.B, Zop1.B + /// MAD Ztied3.B, Pg/M, Zop2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; MLA Zresult.B, Pg/M, Zop2.B, Zop3.B + /// svint8_t svmla[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; MLA Zresult.B, Pg/M, Zop2.B, Zop3.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; MAD Zresult.B, Pg/M, Zop3.B, Zop1.B + /// MOVPRFX Zresult.B, Pg/Z, Zop3.B; MAD Zresult.B, Pg/M, Zop2.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLA ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mla, EA_SCALABLE, REG_V0, REG_P0, REG_P0, REG_V19, INS_OPTS_SCALABLE_B); + /// IF_SVE_FF_3A MLA .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLA .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLA .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right) => MultiplyAdd(addend, left, right); + + /// + /// svint16_t svmla[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3) + /// MLA Ztied1.H, Pg/M, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; MLA Zresult.H, Pg/M, Zop2.H, Zop3.H + /// svint16_t svmla[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3) + /// MLA Ztied1.H, Pg/M, Zop2.H, Zop3.H + /// MAD Ztied2.H, Pg/M, Zop3.H, Zop1.H + /// MAD Ztied3.H, Pg/M, Zop2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; MLA Zresult.H, Pg/M, Zop2.H, Zop3.H + /// svint16_t svmla[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; MLA Zresult.H, Pg/M, Zop2.H, Zop3.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; MAD Zresult.H, Pg/M, Zop3.H, Zop1.H + /// MOVPRFX Zresult.H, Pg/Z, Zop3.H; MAD Zresult.H, Pg/M, Zop2.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLA ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mla, EA_SCALABLE, REG_V0, REG_P0, REG_P0, REG_V19, INS_OPTS_SCALABLE_B); + /// IF_SVE_FF_3A MLA .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLA .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLA .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right) => MultiplyAdd(addend, left, right); + + /// + /// svint32_t svmla[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3) + /// MLA Ztied1.S, Pg/M, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; MLA Zresult.S, Pg/M, Zop2.S, Zop3.S + /// svint32_t svmla[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3) + /// MLA Ztied1.S, Pg/M, Zop2.S, Zop3.S + /// MAD Ztied2.S, Pg/M, Zop3.S, Zop1.S + /// MAD Ztied3.S, Pg/M, Zop2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; MLA Zresult.S, Pg/M, Zop2.S, Zop3.S + /// svint32_t svmla[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; MLA Zresult.S, Pg/M, Zop2.S, Zop3.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; MAD Zresult.S, Pg/M, Zop3.S, Zop1.S + /// MOVPRFX Zresult.S, Pg/Z, Zop3.S; MAD Zresult.S, Pg/M, Zop2.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLA ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mla, EA_SCALABLE, REG_V0, REG_P0, REG_P0, REG_V19, INS_OPTS_SCALABLE_B); + /// IF_SVE_FF_3A MLA .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLA .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLA .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right) => MultiplyAdd(addend, left, right); + + /// + /// svint64_t svmla[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3) + /// MLA Ztied1.D, Pg/M, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; MLA Zresult.D, Pg/M, Zop2.D, Zop3.D + /// svint64_t svmla[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3) + /// MLA Ztied1.D, Pg/M, Zop2.D, Zop3.D + /// MAD Ztied2.D, Pg/M, Zop3.D, Zop1.D + /// MAD Ztied3.D, Pg/M, Zop2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; MLA Zresult.D, Pg/M, Zop2.D, Zop3.D + /// svint64_t svmla[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; MLA Zresult.D, Pg/M, Zop2.D, Zop3.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; MAD Zresult.D, Pg/M, Zop3.D, Zop1.D + /// MOVPRFX Zresult.D, Pg/Z, Zop3.D; MAD Zresult.D, Pg/M, Zop2.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLA ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mla, EA_SCALABLE, REG_V0, REG_P0, REG_P0, REG_V19, INS_OPTS_SCALABLE_B); + /// IF_SVE_FF_3A MLA .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLA .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLA .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right) => MultiplyAdd(addend, left, right); + + /// + /// svuint8_t svmla[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// MLA Ztied1.B, Pg/M, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; MLA Zresult.B, Pg/M, Zop2.B, Zop3.B + /// svuint8_t svmla[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// MLA Ztied1.B, Pg/M, Zop2.B, Zop3.B + /// MAD Ztied2.B, Pg/M, Zop3.B, Zop1.B + /// MAD Ztied3.B, Pg/M, Zop2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; MLA Zresult.B, Pg/M, Zop2.B, Zop3.B + /// svuint8_t svmla[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; MLA Zresult.B, Pg/M, Zop2.B, Zop3.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; MAD Zresult.B, Pg/M, Zop3.B, Zop1.B + /// MOVPRFX Zresult.B, Pg/Z, Zop3.B; MAD Zresult.B, Pg/M, Zop2.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLA ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mla, EA_SCALABLE, REG_V0, REG_P0, REG_P0, REG_V19, INS_OPTS_SCALABLE_B); + /// IF_SVE_FF_3A MLA .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLA .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLA .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right) => MultiplyAdd(addend, left, right); + + /// + /// svuint16_t svmla[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// MLA Ztied1.H, Pg/M, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; MLA Zresult.H, Pg/M, Zop2.H, Zop3.H + /// svuint16_t svmla[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// MLA Ztied1.H, Pg/M, Zop2.H, Zop3.H + /// MAD Ztied2.H, Pg/M, Zop3.H, Zop1.H + /// MAD Ztied3.H, Pg/M, Zop2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; MLA Zresult.H, Pg/M, Zop2.H, Zop3.H + /// svuint16_t svmla[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; MLA Zresult.H, Pg/M, Zop2.H, Zop3.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; MAD Zresult.H, Pg/M, Zop3.H, Zop1.H + /// MOVPRFX Zresult.H, Pg/Z, Zop3.H; MAD Zresult.H, Pg/M, Zop2.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLA ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mla, EA_SCALABLE, REG_V0, REG_P0, REG_P0, REG_V19, INS_OPTS_SCALABLE_B); + /// IF_SVE_FF_3A MLA .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLA .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLA .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right) => MultiplyAdd(addend, left, right); + + /// + /// svuint32_t svmla[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// MLA Ztied1.S, Pg/M, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; MLA Zresult.S, Pg/M, Zop2.S, Zop3.S + /// svuint32_t svmla[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// MLA Ztied1.S, Pg/M, Zop2.S, Zop3.S + /// MAD Ztied2.S, Pg/M, Zop3.S, Zop1.S + /// MAD Ztied3.S, Pg/M, Zop2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; MLA Zresult.S, Pg/M, Zop2.S, Zop3.S + /// svuint32_t svmla[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; MLA Zresult.S, Pg/M, Zop2.S, Zop3.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; MAD Zresult.S, Pg/M, Zop3.S, Zop1.S + /// MOVPRFX Zresult.S, Pg/Z, Zop3.S; MAD Zresult.S, Pg/M, Zop2.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLA ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mla, EA_SCALABLE, REG_V0, REG_P0, REG_P0, REG_V19, INS_OPTS_SCALABLE_B); + /// IF_SVE_FF_3A MLA .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLA .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLA .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right) => MultiplyAdd(addend, left, right); + + /// + /// svuint64_t svmla[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// MLA Ztied1.D, Pg/M, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; MLA Zresult.D, Pg/M, Zop2.D, Zop3.D + /// svuint64_t svmla[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// MLA Ztied1.D, Pg/M, Zop2.D, Zop3.D + /// MAD Ztied2.D, Pg/M, Zop3.D, Zop1.D + /// MAD Ztied3.D, Pg/M, Zop2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; MLA Zresult.D, Pg/M, Zop2.D, Zop3.D + /// svuint64_t svmla[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; MLA Zresult.D, Pg/M, Zop2.D, Zop3.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; MAD Zresult.D, Pg/M, Zop3.D, Zop1.D + /// MOVPRFX Zresult.D, Pg/Z, Zop3.D; MAD Zresult.D, Pg/M, Zop2.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLA ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mla, EA_SCALABLE, REG_V0, REG_P0, REG_P0, REG_V19, INS_OPTS_SCALABLE_B); + /// IF_SVE_FF_3A MLA .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLA .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLA .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right) => MultiplyAdd(addend, left, right); + + + + + /// MultiplyAddRotateComplex : Complex multiply-add with rotate + + /// + /// svfloat32_t svcmla[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_rotation) + /// FCMLA Ztied1.S, Pg/M, Zop2.S, Zop3.S, #imm_rotation + /// MOVPRFX Zresult, Zop1; FCMLA Zresult.S, Pg/M, Zop2.S, Zop3.S, #imm_rotation + /// svfloat32_t svcmla[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_rotation) + /// FCMLA Ztied1.S, Pg/M, Zop2.S, Zop3.S, #imm_rotation + /// MOVPRFX Zresult, Zop1; FCMLA Zresult.S, Pg/M, Zop2.S, Zop3.S, #imm_rotation + /// svfloat32_t svcmla[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_rotation) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; FCMLA Zresult.S, Pg/M, Zop2.S, Zop3.S, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_GV_3A FCMLA .S, .S, .S[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_fcmla, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_fcmla, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_fcmla, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_fcmla, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1, 270, INS_OPTS_SCALABLE_S); + /// IF_SVE_GT_4A FCMLA ., /M, ., ., + /// theEmitter->emitIns_R_R_R_R_I(INS_sve_fcmla, EA_SCALABLE, REG_V2, REG_P1, REG_V3, REG_V4, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R_I(INS_sve_fcmla, EA_SCALABLE, REG_V0, REG_P2, REG_V1, REG_V5, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R_I(INS_sve_fcmla, EA_SCALABLE, REG_V2, REG_P3, REG_V0, REG_V6, 180, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R_I(INS_sve_fcmla, EA_SCALABLE, REG_V2, REG_P3, REG_V0, REG_V6, 270, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) => MultiplyAddRotateComplex(addend, left, right, rotation); + + /// + /// svfloat64_t svcmla[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3, uint64_t imm_rotation) + /// FCMLA Ztied1.D, Pg/M, Zop2.D, Zop3.D, #imm_rotation + /// MOVPRFX Zresult, Zop1; FCMLA Zresult.D, Pg/M, Zop2.D, Zop3.D, #imm_rotation + /// svfloat64_t svcmla[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3, uint64_t imm_rotation) + /// FCMLA Ztied1.D, Pg/M, Zop2.D, Zop3.D, #imm_rotation + /// MOVPRFX Zresult, Zop1; FCMLA Zresult.D, Pg/M, Zop2.D, Zop3.D, #imm_rotation + /// svfloat64_t svcmla[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2, svfloat64_t op3, uint64_t imm_rotation) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; FCMLA Zresult.D, Pg/M, Zop2.D, Zop3.D, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_GV_3A FCMLA .S, .S, .S[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_fcmla, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_fcmla, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_fcmla, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_fcmla, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1, 270, INS_OPTS_SCALABLE_S); + /// IF_SVE_GT_4A FCMLA ., /M, ., ., + /// theEmitter->emitIns_R_R_R_R_I(INS_sve_fcmla, EA_SCALABLE, REG_V2, REG_P1, REG_V3, REG_V4, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R_I(INS_sve_fcmla, EA_SCALABLE, REG_V0, REG_P2, REG_V1, REG_V5, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R_I(INS_sve_fcmla, EA_SCALABLE, REG_V2, REG_P3, REG_V0, REG_V6, 180, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R_I(INS_sve_fcmla, EA_SCALABLE, REG_V2, REG_P3, REG_V0, REG_V6, 270, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) => MultiplyAddRotateComplex(addend, left, right, rotation); + + + /// MultiplyAddRotateComplexBySelectedScalar : Complex multiply-add with rotate + + /// + /// svfloat32_t svcmla_lane[_f32](svfloat32_t op1, svfloat32_t op2, svfloat32_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// FCMLA Ztied1.S, Zop2.S, Zop3.S[imm_index], #imm_rotation + /// MOVPRFX Zresult, Zop1; FCMLA Zresult.S, Zop2.S, Zop3.S[imm_index], #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_GV_3A FCMLA .S, .S, .S[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_fcmla, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_fcmla, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_fcmla, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_fcmla, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1, 270, INS_OPTS_SCALABLE_S); + /// IF_SVE_GT_4A FCMLA ., /M, ., ., + /// theEmitter->emitIns_R_R_R_R_I(INS_sve_fcmla, EA_SCALABLE, REG_V2, REG_P1, REG_V3, REG_V4, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R_I(INS_sve_fcmla, EA_SCALABLE, REG_V0, REG_P2, REG_V1, REG_V5, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R_I(INS_sve_fcmla, EA_SCALABLE, REG_V2, REG_P3, REG_V0, REG_V6, 180, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R_I(INS_sve_fcmla, EA_SCALABLE, REG_V2, REG_P3, REG_V0, REG_V6, 270, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation) => MultiplyAddRotateComplexBySelectedScalar(addend, left, right, rightIndex, rotation); + + + /// MultiplyBySelectedScalar : Multiply + + /// + /// svfloat32_t svmul_lane[_f32](svfloat32_t op1, svfloat32_t op2, uint64_t imm_index) + /// FMUL Zresult.S, Zop1.S, Zop2.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FMUL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmul, EA_SCALABLE, REG_V2, REG_P6, REG_V1, INS_OPTS_SCALABLE_S); + /// IF_SVE_HK_3A FMUL ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmul, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_GX_3A FMUL .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V0, REG_V2, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V4, REG_V6, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V8, REG_V10, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V12, REG_V14, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_GX_3B FMUL .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V1, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V3, REG_V2, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V5, REG_V4, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V7, REG_V6, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HM_2A FMUL ., /M, ., + /// theEmitter->emitIns_R_R_F(INS_sve_fmul, EA_SCALABLE, REG_V5, REG_P1, 0.5, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_F(INS_sve_fmul, EA_SCALABLE, REG_V5, REG_P1, 2.0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); + + /// + /// svfloat64_t svmul_lane[_f64](svfloat64_t op1, svfloat64_t op2, uint64_t imm_index) + /// FMUL Zresult.D, Zop1.D, Zop2.D[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FMUL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmul, EA_SCALABLE, REG_V2, REG_P6, REG_V1, INS_OPTS_SCALABLE_S); + /// IF_SVE_HK_3A FMUL ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmul, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_GX_3A FMUL .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V0, REG_V2, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V4, REG_V6, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V8, REG_V10, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V12, REG_V14, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_GX_3B FMUL .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V1, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V3, REG_V2, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V5, REG_V4, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V7, REG_V6, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HM_2A FMUL ., /M, ., + /// theEmitter->emitIns_R_R_F(INS_sve_fmul, EA_SCALABLE, REG_V5, REG_P1, 0.5, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_F(INS_sve_fmul, EA_SCALABLE, REG_V5, REG_P1, 2.0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); + + + /// MultiplyExtended : Multiply extended (∞×0=2) + + /// + /// svfloat32_t svmulx[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FMULX Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; FMULX Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svfloat32_t svmulx[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FMULX Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// FMULX Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; FMULX Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svfloat32_t svmulx[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; FMULX Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; FMULX Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FMULX ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmulx, EA_SCALABLE, REG_V3, REG_P7, REG_V0, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MultiplyExtended(Vector left, Vector right) => MultiplyExtended(left, right); + + /// + /// svfloat64_t svmulx[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FMULX Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; FMULX Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svfloat64_t svmulx[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FMULX Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// FMULX Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; FMULX Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svfloat64_t svmulx[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; FMULX Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; FMULX Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FMULX ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmulx, EA_SCALABLE, REG_V3, REG_P7, REG_V0, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MultiplyExtended(Vector left, Vector right) => MultiplyExtended(left, right); + + + + /// MultiplySubtract : Multiply-subtract, minuend first + + /// + /// svint8_t svmls[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3) + /// MLS Ztied1.B, Pg/M, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; MLS Zresult.B, Pg/M, Zop2.B, Zop3.B + /// svint8_t svmls[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3) + /// MLS Ztied1.B, Pg/M, Zop2.B, Zop3.B + /// MSB Ztied2.B, Pg/M, Zop3.B, Zop1.B + /// MSB Ztied3.B, Pg/M, Zop2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; MLS Zresult.B, Pg/M, Zop2.B, Zop3.B + /// svint8_t svmls[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2, svint8_t op3) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; MLS Zresult.B, Pg/M, Zop2.B, Zop3.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; MSB Zresult.B, Pg/M, Zop3.B, Zop1.B + /// MOVPRFX Zresult.B, Pg/Z, Zop3.B; MSB Zresult.B, Pg/M, Zop2.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLS ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mls, EA_SCALABLE, REG_V2, REG_P1, REG_V31, REG_V31, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3A MLS .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLS .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLS .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right) => MultiplySubtract(minuend, left, right); + + /// + /// svint16_t svmls[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3) + /// MLS Ztied1.H, Pg/M, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; MLS Zresult.H, Pg/M, Zop2.H, Zop3.H + /// svint16_t svmls[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3) + /// MLS Ztied1.H, Pg/M, Zop2.H, Zop3.H + /// MSB Ztied2.H, Pg/M, Zop3.H, Zop1.H + /// MSB Ztied3.H, Pg/M, Zop2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; MLS Zresult.H, Pg/M, Zop2.H, Zop3.H + /// svint16_t svmls[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2, svint16_t op3) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; MLS Zresult.H, Pg/M, Zop2.H, Zop3.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; MSB Zresult.H, Pg/M, Zop3.H, Zop1.H + /// MOVPRFX Zresult.H, Pg/Z, Zop3.H; MSB Zresult.H, Pg/M, Zop2.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLS ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mls, EA_SCALABLE, REG_V2, REG_P1, REG_V31, REG_V31, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3A MLS .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLS .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLS .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right) => MultiplySubtract(minuend, left, right); + + /// + /// svint32_t svmls[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3) + /// MLS Ztied1.S, Pg/M, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; MLS Zresult.S, Pg/M, Zop2.S, Zop3.S + /// svint32_t svmls[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3) + /// MLS Ztied1.S, Pg/M, Zop2.S, Zop3.S + /// MSB Ztied2.S, Pg/M, Zop3.S, Zop1.S + /// MSB Ztied3.S, Pg/M, Zop2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; MLS Zresult.S, Pg/M, Zop2.S, Zop3.S + /// svint32_t svmls[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2, svint32_t op3) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; MLS Zresult.S, Pg/M, Zop2.S, Zop3.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; MSB Zresult.S, Pg/M, Zop3.S, Zop1.S + /// MOVPRFX Zresult.S, Pg/Z, Zop3.S; MSB Zresult.S, Pg/M, Zop2.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLS ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mls, EA_SCALABLE, REG_V2, REG_P1, REG_V31, REG_V31, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3A MLS .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLS .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLS .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right) => MultiplySubtract(minuend, left, right); + + /// + /// svint64_t svmls[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3) + /// MLS Ztied1.D, Pg/M, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; MLS Zresult.D, Pg/M, Zop2.D, Zop3.D + /// svint64_t svmls[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3) + /// MLS Ztied1.D, Pg/M, Zop2.D, Zop3.D + /// MSB Ztied2.D, Pg/M, Zop3.D, Zop1.D + /// MSB Ztied3.D, Pg/M, Zop2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; MLS Zresult.D, Pg/M, Zop2.D, Zop3.D + /// svint64_t svmls[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2, svint64_t op3) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; MLS Zresult.D, Pg/M, Zop2.D, Zop3.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; MSB Zresult.D, Pg/M, Zop3.D, Zop1.D + /// MOVPRFX Zresult.D, Pg/Z, Zop3.D; MSB Zresult.D, Pg/M, Zop2.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLS ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mls, EA_SCALABLE, REG_V2, REG_P1, REG_V31, REG_V31, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3A MLS .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLS .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLS .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right) => MultiplySubtract(minuend, left, right); + + /// + /// svuint8_t svmls[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// MLS Ztied1.B, Pg/M, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; MLS Zresult.B, Pg/M, Zop2.B, Zop3.B + /// svuint8_t svmls[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// MLS Ztied1.B, Pg/M, Zop2.B, Zop3.B + /// MSB Ztied2.B, Pg/M, Zop3.B, Zop1.B + /// MSB Ztied3.B, Pg/M, Zop2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; MLS Zresult.B, Pg/M, Zop2.B, Zop3.B + /// svuint8_t svmls[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; MLS Zresult.B, Pg/M, Zop2.B, Zop3.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; MSB Zresult.B, Pg/M, Zop3.B, Zop1.B + /// MOVPRFX Zresult.B, Pg/Z, Zop3.B; MSB Zresult.B, Pg/M, Zop2.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLS ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mls, EA_SCALABLE, REG_V2, REG_P1, REG_V31, REG_V31, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3A MLS .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLS .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLS .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right) => MultiplySubtract(minuend, left, right); + + /// + /// svuint16_t svmls[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// MLS Ztied1.H, Pg/M, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; MLS Zresult.H, Pg/M, Zop2.H, Zop3.H + /// svuint16_t svmls[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// MLS Ztied1.H, Pg/M, Zop2.H, Zop3.H + /// MSB Ztied2.H, Pg/M, Zop3.H, Zop1.H + /// MSB Ztied3.H, Pg/M, Zop2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; MLS Zresult.H, Pg/M, Zop2.H, Zop3.H + /// svuint16_t svmls[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; MLS Zresult.H, Pg/M, Zop2.H, Zop3.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; MSB Zresult.H, Pg/M, Zop3.H, Zop1.H + /// MOVPRFX Zresult.H, Pg/Z, Zop3.H; MSB Zresult.H, Pg/M, Zop2.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLS ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mls, EA_SCALABLE, REG_V2, REG_P1, REG_V31, REG_V31, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3A MLS .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLS .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLS .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right) => MultiplySubtract(minuend, left, right); + + /// + /// svuint32_t svmls[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// MLS Ztied1.S, Pg/M, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; MLS Zresult.S, Pg/M, Zop2.S, Zop3.S + /// svuint32_t svmls[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// MLS Ztied1.S, Pg/M, Zop2.S, Zop3.S + /// MSB Ztied2.S, Pg/M, Zop3.S, Zop1.S + /// MSB Ztied3.S, Pg/M, Zop2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; MLS Zresult.S, Pg/M, Zop2.S, Zop3.S + /// svuint32_t svmls[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; MLS Zresult.S, Pg/M, Zop2.S, Zop3.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; MSB Zresult.S, Pg/M, Zop3.S, Zop1.S + /// MOVPRFX Zresult.S, Pg/Z, Zop3.S; MSB Zresult.S, Pg/M, Zop2.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLS ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mls, EA_SCALABLE, REG_V2, REG_P1, REG_V31, REG_V31, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3A MLS .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLS .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLS .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right) => MultiplySubtract(minuend, left, right); + + /// + /// svuint64_t svmls[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// MLS Ztied1.D, Pg/M, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; MLS Zresult.D, Pg/M, Zop2.D, Zop3.D + /// svuint64_t svmls[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// MLS Ztied1.D, Pg/M, Zop2.D, Zop3.D + /// MSB Ztied2.D, Pg/M, Zop3.D, Zop1.D + /// MSB Ztied3.D, Pg/M, Zop2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; MLS Zresult.D, Pg/M, Zop2.D, Zop3.D + /// svuint64_t svmls[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; MLS Zresult.D, Pg/M, Zop2.D, Zop3.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; MSB Zresult.D, Pg/M, Zop3.D, Zop1.D + /// MOVPRFX Zresult.D, Pg/Z, Zop3.D; MSB Zresult.D, Pg/M, Zop2.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLS ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mls, EA_SCALABLE, REG_V2, REG_P1, REG_V31, REG_V31, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3A MLS .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLS .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLS .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right) => MultiplySubtract(minuend, left, right); + + + + + /// Negate : Negate + + /// + /// svint8_t svneg[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) + /// NEG Ztied.B, Pg/M, Zop.B + /// MOVPRFX Zresult, Zinactive; NEG Zresult.B, Pg/M, Zop.B + /// svint8_t svneg[_s8]_x(svbool_t pg, svint8_t op) + /// NEG Ztied.B, Pg/M, Ztied.B + /// MOVPRFX Zresult, Zop; NEG Zresult.B, Pg/M, Zop.B + /// svint8_t svneg[_s8]_z(svbool_t pg, svint8_t op) + /// MOVPRFX Zresult.B, Pg/Z, Zop.B; NEG Zresult.B, Pg/M, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_AQ_3A NEG ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_neg, EA_SCALABLE, REG_V23, REG_P0, REG_V8, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Negate(Vector value) => Negate(value); + + /// + /// svint16_t svneg[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) + /// NEG Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; NEG Zresult.H, Pg/M, Zop.H + /// svint16_t svneg[_s16]_x(svbool_t pg, svint16_t op) + /// NEG Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; NEG Zresult.H, Pg/M, Zop.H + /// svint16_t svneg[_s16]_z(svbool_t pg, svint16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; NEG Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_AQ_3A NEG ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_neg, EA_SCALABLE, REG_V23, REG_P0, REG_V8, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Negate(Vector value) => Negate(value); + + /// + /// svint32_t svneg[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// NEG Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; NEG Zresult.S, Pg/M, Zop.S + /// svint32_t svneg[_s32]_x(svbool_t pg, svint32_t op) + /// NEG Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; NEG Zresult.S, Pg/M, Zop.S + /// svint32_t svneg[_s32]_z(svbool_t pg, svint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; NEG Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AQ_3A NEG ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_neg, EA_SCALABLE, REG_V23, REG_P0, REG_V8, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Negate(Vector value) => Negate(value); + + /// + /// svint64_t svneg[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// NEG Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; NEG Zresult.D, Pg/M, Zop.D + /// svint64_t svneg[_s64]_x(svbool_t pg, svint64_t op) + /// NEG Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; NEG Zresult.D, Pg/M, Zop.D + /// svint64_t svneg[_s64]_z(svbool_t pg, svint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; NEG Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AQ_3A NEG ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_neg, EA_SCALABLE, REG_V23, REG_P0, REG_V8, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Negate(Vector value) => Negate(value); + + /// + /// svfloat32_t svneg[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) + /// FNEG Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; FNEG Zresult.S, Pg/M, Zop.S + /// svfloat32_t svneg[_f32]_x(svbool_t pg, svfloat32_t op) + /// FNEG Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; FNEG Zresult.S, Pg/M, Zop.S + /// svfloat32_t svneg[_f32]_z(svbool_t pg, svfloat32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; FNEG Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AP_3A FNEG ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_fneg, EA_SCALABLE, REG_V26, REG_P5, REG_V5, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Negate(Vector value) => Negate(value); + + /// + /// svfloat64_t svneg[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) + /// FNEG Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; FNEG Zresult.D, Pg/M, Zop.D + /// svfloat64_t svneg[_f64]_x(svbool_t pg, svfloat64_t op) + /// FNEG Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; FNEG Zresult.D, Pg/M, Zop.D + /// svfloat64_t svneg[_f64]_z(svbool_t pg, svfloat64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; FNEG Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AP_3A FNEG ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_fneg, EA_SCALABLE, REG_V26, REG_P5, REG_V5, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Negate(Vector value) => Negate(value); + + + + + /// Not : Bitwise invert + + /// + /// svint8_t svnot[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) + /// NOT Ztied.B, Pg/M, Zop.B + /// MOVPRFX Zresult, Zinactive; NOT Zresult.B, Pg/M, Zop.B + /// svint8_t svnot[_s8]_x(svbool_t pg, svint8_t op) + /// NOT Ztied.B, Pg/M, Ztied.B + /// MOVPRFX Zresult, Zop; NOT Zresult.B, Pg/M, Zop.B + /// svint8_t svnot[_s8]_z(svbool_t pg, svint8_t op) + /// MOVPRFX Zresult.B, Pg/Z, Zop.B; NOT Zresult.B, Pg/M, Zop.B + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) + /// EOR Presult.B, Pg/Z, Pop.B, Pg.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_not, EA_SCALABLE, REG_P9, REG_P5, REG_P15, INS_OPTS_SCALABLE_B); /* NOT .B, /Z, .B */ + /// IF_SVE_AP_3A NOT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_not, EA_SCALABLE, REG_V25, REG_P6, REG_V6, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Not(Vector value) => Not(value); + + /// + /// svint16_t svnot[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) + /// NOT Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; NOT Zresult.H, Pg/M, Zop.H + /// svint16_t svnot[_s16]_x(svbool_t pg, svint16_t op) + /// NOT Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; NOT Zresult.H, Pg/M, Zop.H + /// svint16_t svnot[_s16]_z(svbool_t pg, svint16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; NOT Zresult.H, Pg/M, Zop.H + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) + /// EOR Presult.B, Pg/Z, Pop.B, Pg.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_not, EA_SCALABLE, REG_P9, REG_P5, REG_P15, INS_OPTS_SCALABLE_B); /* NOT .B, /Z, .B */ + /// IF_SVE_AP_3A NOT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_not, EA_SCALABLE, REG_V25, REG_P6, REG_V6, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Not(Vector value) => Not(value); + + /// + /// svint32_t svnot[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// NOT Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; NOT Zresult.S, Pg/M, Zop.S + /// svint32_t svnot[_s32]_x(svbool_t pg, svint32_t op) + /// NOT Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; NOT Zresult.S, Pg/M, Zop.S + /// svint32_t svnot[_s32]_z(svbool_t pg, svint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; NOT Zresult.S, Pg/M, Zop.S + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) + /// EOR Presult.B, Pg/Z, Pop.B, Pg.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_not, EA_SCALABLE, REG_P9, REG_P5, REG_P15, INS_OPTS_SCALABLE_B); /* NOT .B, /Z, .B */ + /// IF_SVE_AP_3A NOT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_not, EA_SCALABLE, REG_V25, REG_P6, REG_V6, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Not(Vector value) => Not(value); + + /// + /// svint64_t svnot[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// NOT Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; NOT Zresult.D, Pg/M, Zop.D + /// svint64_t svnot[_s64]_x(svbool_t pg, svint64_t op) + /// NOT Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; NOT Zresult.D, Pg/M, Zop.D + /// svint64_t svnot[_s64]_z(svbool_t pg, svint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; NOT Zresult.D, Pg/M, Zop.D + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) + /// EOR Presult.B, Pg/Z, Pop.B, Pg.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_not, EA_SCALABLE, REG_P9, REG_P5, REG_P15, INS_OPTS_SCALABLE_B); /* NOT .B, /Z, .B */ + /// IF_SVE_AP_3A NOT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_not, EA_SCALABLE, REG_V25, REG_P6, REG_V6, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Not(Vector value) => Not(value); + + /// + /// svuint8_t svnot[_u8]_m(svuint8_t inactive, svbool_t pg, svuint8_t op) + /// NOT Ztied.B, Pg/M, Zop.B + /// MOVPRFX Zresult, Zinactive; NOT Zresult.B, Pg/M, Zop.B + /// svuint8_t svnot[_u8]_x(svbool_t pg, svuint8_t op) + /// NOT Ztied.B, Pg/M, Ztied.B + /// MOVPRFX Zresult, Zop; NOT Zresult.B, Pg/M, Zop.B + /// svuint8_t svnot[_u8]_z(svbool_t pg, svuint8_t op) + /// MOVPRFX Zresult.B, Pg/Z, Zop.B; NOT Zresult.B, Pg/M, Zop.B + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) + /// EOR Presult.B, Pg/Z, Pop.B, Pg.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_not, EA_SCALABLE, REG_P9, REG_P5, REG_P15, INS_OPTS_SCALABLE_B); /* NOT .B, /Z, .B */ + /// IF_SVE_AP_3A NOT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_not, EA_SCALABLE, REG_V25, REG_P6, REG_V6, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Not(Vector value) => Not(value); + + /// + /// svuint16_t svnot[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) + /// NOT Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; NOT Zresult.H, Pg/M, Zop.H + /// svuint16_t svnot[_u16]_x(svbool_t pg, svuint16_t op) + /// NOT Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; NOT Zresult.H, Pg/M, Zop.H + /// svuint16_t svnot[_u16]_z(svbool_t pg, svuint16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; NOT Zresult.H, Pg/M, Zop.H + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) + /// EOR Presult.B, Pg/Z, Pop.B, Pg.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_not, EA_SCALABLE, REG_P9, REG_P5, REG_P15, INS_OPTS_SCALABLE_B); /* NOT .B, /Z, .B */ + /// IF_SVE_AP_3A NOT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_not, EA_SCALABLE, REG_V25, REG_P6, REG_V6, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Not(Vector value) => Not(value); + + /// + /// svuint32_t svnot[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// NOT Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; NOT Zresult.S, Pg/M, Zop.S + /// svuint32_t svnot[_u32]_x(svbool_t pg, svuint32_t op) + /// NOT Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; NOT Zresult.S, Pg/M, Zop.S + /// svuint32_t svnot[_u32]_z(svbool_t pg, svuint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; NOT Zresult.S, Pg/M, Zop.S + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) + /// EOR Presult.B, Pg/Z, Pop.B, Pg.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_not, EA_SCALABLE, REG_P9, REG_P5, REG_P15, INS_OPTS_SCALABLE_B); /* NOT .B, /Z, .B */ + /// IF_SVE_AP_3A NOT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_not, EA_SCALABLE, REG_V25, REG_P6, REG_V6, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Not(Vector value) => Not(value); + + /// + /// svuint64_t svnot[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// NOT Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; NOT Zresult.D, Pg/M, Zop.D + /// svuint64_t svnot[_u64]_x(svbool_t pg, svuint64_t op) + /// NOT Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; NOT Zresult.D, Pg/M, Zop.D + /// svuint64_t svnot[_u64]_z(svbool_t pg, svuint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; NOT Zresult.D, Pg/M, Zop.D + /// svbool_t svnot[_b]_z(svbool_t pg, svbool_t op) + /// EOR Presult.B, Pg/Z, Pop.B, Pg.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_not, EA_SCALABLE, REG_P9, REG_P5, REG_P15, INS_OPTS_SCALABLE_B); /* NOT .B, /Z, .B */ + /// IF_SVE_AP_3A NOT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_not, EA_SCALABLE, REG_V25, REG_P6, REG_V6, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Not(Vector value) => Not(value); + + + /// Or : Bitwise inclusive OR + + /// + /// svint8_t svorr[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// ORR Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; ORR Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svorr[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// ORR Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// ORR Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// ORR Zresult.D, Zop1.D, Zop2.D + /// svint8_t svorr[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; ORR Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; ORR Zresult.B, Pg/M, Zresult.B, Zop1.B + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// ORR Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A ORR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_orr, EA_SCALABLE, REG_V29, REG_P7, REG_V31, INS_OPTS_SCALABLE_D); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_orr, EA_SCALABLE, REG_P0, REG_P0, REG_P15, REG_P12, INS_OPTS_SCALABLE_B); /* ORR .B, /Z, .B, .B */ + /// IF_SVE_AU_3A ORR .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_orr, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Or(Vector left, Vector right) => Or(left, right); + + /// + /// svint16_t svorr[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// ORR Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; ORR Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svorr[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// ORR Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// ORR Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// ORR Zresult.D, Zop1.D, Zop2.D + /// svint16_t svorr[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; ORR Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; ORR Zresult.H, Pg/M, Zresult.H, Zop1.H + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// ORR Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A ORR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_orr, EA_SCALABLE, REG_V29, REG_P7, REG_V31, INS_OPTS_SCALABLE_D); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_orr, EA_SCALABLE, REG_P0, REG_P0, REG_P15, REG_P12, INS_OPTS_SCALABLE_B); /* ORR .B, /Z, .B, .B */ + /// IF_SVE_AU_3A ORR .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_orr, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Or(Vector left, Vector right) => Or(left, right); + + /// + /// svint32_t svorr[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// ORR Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; ORR Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svorr[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// ORR Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// ORR Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// ORR Zresult.D, Zop1.D, Zop2.D + /// svint32_t svorr[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; ORR Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; ORR Zresult.S, Pg/M, Zresult.S, Zop1.S + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// ORR Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A ORR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_orr, EA_SCALABLE, REG_V29, REG_P7, REG_V31, INS_OPTS_SCALABLE_D); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_orr, EA_SCALABLE, REG_P0, REG_P0, REG_P15, REG_P12, INS_OPTS_SCALABLE_B); /* ORR .B, /Z, .B, .B */ + /// IF_SVE_AU_3A ORR .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_orr, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Or(Vector left, Vector right) => Or(left, right); + + /// + /// svint64_t svorr[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// ORR Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; ORR Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svorr[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// ORR Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// ORR Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// ORR Zresult.D, Zop1.D, Zop2.D + /// svint64_t svorr[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; ORR Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; ORR Zresult.D, Pg/M, Zresult.D, Zop1.D + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// ORR Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A ORR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_orr, EA_SCALABLE, REG_V29, REG_P7, REG_V31, INS_OPTS_SCALABLE_D); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_orr, EA_SCALABLE, REG_P0, REG_P0, REG_P15, REG_P12, INS_OPTS_SCALABLE_B); /* ORR .B, /Z, .B, .B */ + /// IF_SVE_AU_3A ORR .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_orr, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Or(Vector left, Vector right) => Or(left, right); + + /// + /// svuint8_t svorr[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// ORR Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; ORR Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svorr[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// ORR Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// ORR Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// ORR Zresult.D, Zop1.D, Zop2.D + /// svuint8_t svorr[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; ORR Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; ORR Zresult.B, Pg/M, Zresult.B, Zop1.B + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// ORR Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A ORR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_orr, EA_SCALABLE, REG_V29, REG_P7, REG_V31, INS_OPTS_SCALABLE_D); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_orr, EA_SCALABLE, REG_P0, REG_P0, REG_P15, REG_P12, INS_OPTS_SCALABLE_B); /* ORR .B, /Z, .B, .B */ + /// IF_SVE_AU_3A ORR .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_orr, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Or(Vector left, Vector right) => Or(left, right); + + /// + /// svuint16_t svorr[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// ORR Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; ORR Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svorr[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// ORR Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// ORR Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// ORR Zresult.D, Zop1.D, Zop2.D + /// svuint16_t svorr[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; ORR Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; ORR Zresult.H, Pg/M, Zresult.H, Zop1.H + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// ORR Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A ORR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_orr, EA_SCALABLE, REG_V29, REG_P7, REG_V31, INS_OPTS_SCALABLE_D); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_orr, EA_SCALABLE, REG_P0, REG_P0, REG_P15, REG_P12, INS_OPTS_SCALABLE_B); /* ORR .B, /Z, .B, .B */ + /// IF_SVE_AU_3A ORR .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_orr, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Or(Vector left, Vector right) => Or(left, right); + + /// + /// svuint32_t svorr[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// ORR Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; ORR Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svorr[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// ORR Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// ORR Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// ORR Zresult.D, Zop1.D, Zop2.D + /// svuint32_t svorr[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; ORR Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; ORR Zresult.S, Pg/M, Zresult.S, Zop1.S + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// ORR Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A ORR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_orr, EA_SCALABLE, REG_V29, REG_P7, REG_V31, INS_OPTS_SCALABLE_D); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_orr, EA_SCALABLE, REG_P0, REG_P0, REG_P15, REG_P12, INS_OPTS_SCALABLE_B); /* ORR .B, /Z, .B, .B */ + /// IF_SVE_AU_3A ORR .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_orr, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Or(Vector left, Vector right) => Or(left, right); + + /// + /// svuint64_t svorr[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// ORR Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; ORR Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svorr[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// ORR Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// ORR Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// ORR Zresult.D, Zop1.D, Zop2.D + /// svuint64_t svorr[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; ORR Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; ORR Zresult.D, Pg/M, Zresult.D, Zop1.D + /// svbool_t svorr[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// ORR Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A ORR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_orr, EA_SCALABLE, REG_V29, REG_P7, REG_V31, INS_OPTS_SCALABLE_D); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_orr, EA_SCALABLE, REG_P0, REG_P0, REG_P15, REG_P12, INS_OPTS_SCALABLE_B); /* ORR .B, /Z, .B, .B */ + /// IF_SVE_AU_3A ORR .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_orr, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Or(Vector left, Vector right) => Or(left, right); + + + /// OrAcross : Bitwise inclusive OR reduction to scalar + + /// + /// int8_t svorv[_s8](svbool_t pg, svint8_t op) + /// ORV Bresult, Pg, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_AF_3A ORV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_orv, EA_4BYTE, REG_V2, REG_P2, REG_V2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_orv, EA_8BYTE, REG_V3, REG_P3, REG_V3, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector OrAcross(Vector value) => OrAcross(value); + + /// + /// int16_t svorv[_s16](svbool_t pg, svint16_t op) + /// ORV Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_AF_3A ORV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_orv, EA_4BYTE, REG_V2, REG_P2, REG_V2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_orv, EA_8BYTE, REG_V3, REG_P3, REG_V3, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector OrAcross(Vector value) => OrAcross(value); + + /// + /// int32_t svorv[_s32](svbool_t pg, svint32_t op) + /// ORV Sresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AF_3A ORV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_orv, EA_4BYTE, REG_V2, REG_P2, REG_V2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_orv, EA_8BYTE, REG_V3, REG_P3, REG_V3, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector OrAcross(Vector value) => OrAcross(value); + + /// + /// int64_t svorv[_s64](svbool_t pg, svint64_t op) + /// ORV Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AF_3A ORV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_orv, EA_4BYTE, REG_V2, REG_P2, REG_V2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_orv, EA_8BYTE, REG_V3, REG_P3, REG_V3, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector OrAcross(Vector value) => OrAcross(value); + + /// + /// uint8_t svorv[_u8](svbool_t pg, svuint8_t op) + /// ORV Bresult, Pg, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_AF_3A ORV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_orv, EA_4BYTE, REG_V2, REG_P2, REG_V2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_orv, EA_8BYTE, REG_V3, REG_P3, REG_V3, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector OrAcross(Vector value) => OrAcross(value); + + /// + /// uint16_t svorv[_u16](svbool_t pg, svuint16_t op) + /// ORV Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_AF_3A ORV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_orv, EA_4BYTE, REG_V2, REG_P2, REG_V2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_orv, EA_8BYTE, REG_V3, REG_P3, REG_V3, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector OrAcross(Vector value) => OrAcross(value); + + /// + /// uint32_t svorv[_u32](svbool_t pg, svuint32_t op) + /// ORV Sresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AF_3A ORV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_orv, EA_4BYTE, REG_V2, REG_P2, REG_V2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_orv, EA_8BYTE, REG_V3, REG_P3, REG_V3, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector OrAcross(Vector value) => OrAcross(value); + + /// + /// uint64_t svorv[_u64](svbool_t pg, svuint64_t op) + /// ORV Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AF_3A ORV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_orv, EA_4BYTE, REG_V2, REG_P2, REG_V2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_orv, EA_8BYTE, REG_V3, REG_P3, REG_V3, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector OrAcross(Vector value) => OrAcross(value); + + + /// OrNot : Bitwise NOR + + /// + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// NOR Presult.B, Pg/Z, Pop1.B, Pop2.B + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// ORN Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_nor, EA_SCALABLE, REG_P5, REG_P13, REG_P10, REG_P7, INS_OPTS_SCALABLE_B); /* NOR .B, /Z, .B, .B */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector OrNot(Vector left, Vector right) => OrNot(left, right); + + /// + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// NOR Presult.B, Pg/Z, Pop1.B, Pop2.B + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// ORN Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_nor, EA_SCALABLE, REG_P5, REG_P13, REG_P10, REG_P7, INS_OPTS_SCALABLE_B); /* NOR .B, /Z, .B, .B */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector OrNot(Vector left, Vector right) => OrNot(left, right); + + /// + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// NOR Presult.B, Pg/Z, Pop1.B, Pop2.B + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// ORN Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_nor, EA_SCALABLE, REG_P5, REG_P13, REG_P10, REG_P7, INS_OPTS_SCALABLE_B); /* NOR .B, /Z, .B, .B */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector OrNot(Vector left, Vector right) => OrNot(left, right); + + /// + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// NOR Presult.B, Pg/Z, Pop1.B, Pop2.B + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// ORN Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_nor, EA_SCALABLE, REG_P5, REG_P13, REG_P10, REG_P7, INS_OPTS_SCALABLE_B); /* NOR .B, /Z, .B, .B */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector OrNot(Vector left, Vector right) => OrNot(left, right); + + /// + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// NOR Presult.B, Pg/Z, Pop1.B, Pop2.B + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// ORN Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_nor, EA_SCALABLE, REG_P5, REG_P13, REG_P10, REG_P7, INS_OPTS_SCALABLE_B); /* NOR .B, /Z, .B, .B */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector OrNot(Vector left, Vector right) => OrNot(left, right); + + /// + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// NOR Presult.B, Pg/Z, Pop1.B, Pop2.B + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// ORN Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_nor, EA_SCALABLE, REG_P5, REG_P13, REG_P10, REG_P7, INS_OPTS_SCALABLE_B); /* NOR .B, /Z, .B, .B */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector OrNot(Vector left, Vector right) => OrNot(left, right); + + /// + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// NOR Presult.B, Pg/Z, Pop1.B, Pop2.B + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// ORN Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_nor, EA_SCALABLE, REG_P5, REG_P13, REG_P10, REG_P7, INS_OPTS_SCALABLE_B); /* NOR .B, /Z, .B, .B */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector OrNot(Vector left, Vector right) => OrNot(left, right); + + /// + /// svbool_t svnor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// NOR Presult.B, Pg/Z, Pop1.B, Pop2.B + /// svbool_t svorn[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// ORN Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_nor, EA_SCALABLE, REG_P5, REG_P13, REG_P10, REG_P7, INS_OPTS_SCALABLE_B); /* NOR .B, /Z, .B, .B */ + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector OrNot(Vector left, Vector right) => OrNot(left, right); + + + /// PopCount : Count nonzero bits + + /// + /// svuint8_t svcnt[_s8]_m(svuint8_t inactive, svbool_t pg, svint8_t op) + /// CNT Ztied.B, Pg/M, Zop.B + /// MOVPRFX Zresult, Zinactive; CNT Zresult.B, Pg/M, Zop.B + /// svuint8_t svcnt[_s8]_x(svbool_t pg, svint8_t op) + /// CNT Ztied.B, Pg/M, Ztied.B + /// MOVPRFX Zresult, Zop; CNT Zresult.B, Pg/M, Zop.B + /// svuint8_t svcnt[_s8]_z(svbool_t pg, svint8_t op) + /// MOVPRFX Zresult.B, Pg/Z, Zop.B; CNT Zresult.B, Pg/M, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CNT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_cnt, EA_SCALABLE, REG_V28, REG_P3, REG_V3, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector PopCount(Vector value) => PopCount(value); + + /// + /// svuint8_t svcnt[_u8]_m(svuint8_t inactive, svbool_t pg, svuint8_t op) + /// CNT Ztied.B, Pg/M, Zop.B + /// MOVPRFX Zresult, Zinactive; CNT Zresult.B, Pg/M, Zop.B + /// svuint8_t svcnt[_u8]_x(svbool_t pg, svuint8_t op) + /// CNT Ztied.B, Pg/M, Ztied.B + /// MOVPRFX Zresult, Zop; CNT Zresult.B, Pg/M, Zop.B + /// svuint8_t svcnt[_u8]_z(svbool_t pg, svuint8_t op) + /// MOVPRFX Zresult.B, Pg/Z, Zop.B; CNT Zresult.B, Pg/M, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CNT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_cnt, EA_SCALABLE, REG_V28, REG_P3, REG_V3, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector PopCount(Vector value) => PopCount(value); + + /// + /// svuint16_t svcnt[_s16]_m(svuint16_t inactive, svbool_t pg, svint16_t op) + /// CNT Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; CNT Zresult.H, Pg/M, Zop.H + /// svuint16_t svcnt[_s16]_x(svbool_t pg, svint16_t op) + /// CNT Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; CNT Zresult.H, Pg/M, Zop.H + /// svuint16_t svcnt[_s16]_z(svbool_t pg, svint16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; CNT Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CNT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_cnt, EA_SCALABLE, REG_V28, REG_P3, REG_V3, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector PopCount(Vector value) => PopCount(value); + + /// + /// svuint16_t svcnt[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) + /// CNT Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; CNT Zresult.H, Pg/M, Zop.H + /// svuint16_t svcnt[_u16]_x(svbool_t pg, svuint16_t op) + /// CNT Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; CNT Zresult.H, Pg/M, Zop.H + /// svuint16_t svcnt[_u16]_z(svbool_t pg, svuint16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; CNT Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CNT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_cnt, EA_SCALABLE, REG_V28, REG_P3, REG_V3, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector PopCount(Vector value) => PopCount(value); + + /// + /// svuint32_t svcnt[_s32]_m(svuint32_t inactive, svbool_t pg, svint32_t op) + /// CNT Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; CNT Zresult.S, Pg/M, Zop.S + /// svuint32_t svcnt[_s32]_x(svbool_t pg, svint32_t op) + /// CNT Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; CNT Zresult.S, Pg/M, Zop.S + /// svuint32_t svcnt[_s32]_z(svbool_t pg, svint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; CNT Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CNT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_cnt, EA_SCALABLE, REG_V28, REG_P3, REG_V3, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector PopCount(Vector value) => PopCount(value); + + /// + /// svuint32_t svcnt[_f32]_m(svuint32_t inactive, svbool_t pg, svfloat32_t op) + /// CNT Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; CNT Zresult.S, Pg/M, Zop.S + /// svuint32_t svcnt[_f32]_x(svbool_t pg, svfloat32_t op) + /// CNT Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; CNT Zresult.S, Pg/M, Zop.S + /// svuint32_t svcnt[_f32]_z(svbool_t pg, svfloat32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; CNT Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CNT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_cnt, EA_SCALABLE, REG_V28, REG_P3, REG_V3, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector PopCount(Vector value) => PopCount(value); + + /// + /// svuint32_t svcnt[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// CNT Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; CNT Zresult.S, Pg/M, Zop.S + /// svuint32_t svcnt[_u32]_x(svbool_t pg, svuint32_t op) + /// CNT Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; CNT Zresult.S, Pg/M, Zop.S + /// svuint32_t svcnt[_u32]_z(svbool_t pg, svuint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; CNT Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CNT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_cnt, EA_SCALABLE, REG_V28, REG_P3, REG_V3, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector PopCount(Vector value) => PopCount(value); + + /// + /// svuint64_t svcnt[_s64]_m(svuint64_t inactive, svbool_t pg, svint64_t op) + /// CNT Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; CNT Zresult.D, Pg/M, Zop.D + /// svuint64_t svcnt[_s64]_x(svbool_t pg, svint64_t op) + /// CNT Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; CNT Zresult.D, Pg/M, Zop.D + /// svuint64_t svcnt[_s64]_z(svbool_t pg, svint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; CNT Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CNT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_cnt, EA_SCALABLE, REG_V28, REG_P3, REG_V3, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector PopCount(Vector value) => PopCount(value); + + /// + /// svuint64_t svcnt[_f64]_m(svuint64_t inactive, svbool_t pg, svfloat64_t op) + /// CNT Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; CNT Zresult.D, Pg/M, Zop.D + /// svuint64_t svcnt[_f64]_x(svbool_t pg, svfloat64_t op) + /// CNT Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; CNT Zresult.D, Pg/M, Zop.D + /// svuint64_t svcnt[_f64]_z(svbool_t pg, svfloat64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; CNT Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CNT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_cnt, EA_SCALABLE, REG_V28, REG_P3, REG_V3, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector PopCount(Vector value) => PopCount(value); + + /// + /// svuint64_t svcnt[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// CNT Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; CNT Zresult.D, Pg/M, Zop.D + /// svuint64_t svcnt[_u64]_x(svbool_t pg, svuint64_t op) + /// CNT Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; CNT Zresult.D, Pg/M, Zop.D + /// svuint64_t svcnt[_u64]_z(svbool_t pg, svuint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; CNT Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CNT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_cnt, EA_SCALABLE, REG_V28, REG_P3, REG_V3, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector PopCount(Vector value) => PopCount(value); + + + /// PrefetchBytes : Prefetch bytes + + /// + /// void svprfb(svbool_t pg, const void *base, enum svprfop op) + /// PRFB op, Pg, [Xarray, Xindex] + /// PRFB op, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFB , , [, .S, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL2KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL2STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL3KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL3STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL1STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL2KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL2STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL3KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PSTL3STRM, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST6, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST7, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST14, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_CONST15, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// IF_SVE_HY_3A_A PRFB , , [, .D, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// IF_SVE_HY_3B PRFB , , [, .D] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R1, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_HZ_2A_B PRFB , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_V2, 31, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFB , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P2, REG_R3, -32); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P2, REG_R3, 17); + /// IF_SVE_IB_3A PRFB , , [, ] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfb, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R1, REG_R2); + /// + public static unsafe void PrefetchBytes(Vector mask, void* address, [ConstantExpected] SvePrefetchType prefetchType) => PrefetchBytes(mask, address, prefetchType); + + + /// PrefetchInt16 : Prefetch halfwords + + /// + /// void svprfh(svbool_t pg, const void *base, enum svprfop op) + /// PRFH op, Pg, [Xarray, Xindex, LSL #1] + /// PRFH op, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFH , , [, .S, #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R8, REG_V9, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFH , , [, .D, #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R8, REG_V9, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFH , , [, .D, LSL #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFH , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_V4, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_V4, 62, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFH , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P1, REG_R2, 0); + /// IF_SVE_IB_3A PRFH , , [, , LSL #1] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfh, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R7, REG_R8, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void PrefetchInt16(Vector mask, void* address, [ConstantExpected] SvePrefetchType prefetchType) => PrefetchInt16(mask, address, prefetchType); + + + /// PrefetchInt32 : Prefetch words + + /// + /// void svprfw(svbool_t pg, const void *base, enum svprfop op) + /// PRFW op, Pg, [Xarray, Xindex, LSL #2] + /// PRFW op, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFW , , [, .S, #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R2, REG_V1, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFW , , [, .D, #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R2, REG_V1, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFW , , [, .D, LSL #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P3, REG_R2, REG_V1, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFW , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_V5, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_V5, 124, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFW , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P0, REG_R5, -32); + /// IF_SVE_IB_3A PRFW , , [, , LSL #2] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfw, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R1, REG_R9, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void PrefetchInt32(Vector mask, void* address, [ConstantExpected] SvePrefetchType prefetchType) => PrefetchInt32(mask, address, prefetchType); + + + /// PrefetchInt64 : Prefetch doublewords + + /// + /// void svprfd(svbool_t pg, const void *base, enum svprfop op) + /// PRFD op, Pg, [Xarray, Xindex, LSL #3] + /// PRFD op, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_HY_3A PRFD , , [, .S, #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3A_A PRFD , , [, .D, #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P6, REG_R5, REG_V4, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HY_3B PRFD , , [, .D, LSL #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R4, REG_V3, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HZ_2A_B PRFD , , [.S{, #}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P4, REG_V3, 248, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P4, REG_V3, 248, INS_OPTS_SCALABLE_D); + /// IF_SVE_IA_2A PRFD , , [{, #, MUL VL}] + /// theEmitter->emitIns_PRFOP_R_R_I(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P7, REG_R4, 31); + /// IF_SVE_IB_3A PRFD , , [, , LSL #3] + /// theEmitter->emitIns_PRFOP_R_R_R(INS_sve_prfd, EA_SCALABLE, SVE_PRFOP_PLDL1KEEP, REG_P5, REG_R4, REG_R3, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void PrefetchInt64(Vector mask, void* address, [ConstantExpected] SvePrefetchType prefetchType) => PrefetchInt64(mask, address, prefetchType); + + + /// ReciprocalEstimate : Reciprocal estimate + + /// + /// svfloat32_t svrecpe[_f32](svfloat32_t op) + /// FRECPE Zresult.S, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_HF_2A FRECPE ., . + /// theEmitter->emitIns_R_R(INS_sve_frecpe, EA_SCALABLE, REG_V0, REG_V2, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ReciprocalEstimate(Vector value) => ReciprocalEstimate(value); + + /// + /// svfloat64_t svrecpe[_f64](svfloat64_t op) + /// FRECPE Zresult.D, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_HF_2A FRECPE ., . + /// theEmitter->emitIns_R_R(INS_sve_frecpe, EA_SCALABLE, REG_V0, REG_V2, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ReciprocalEstimate(Vector value) => ReciprocalEstimate(value); + + + /// ReciprocalExponent : Reciprocal exponent + + /// + /// svfloat32_t svrecpx[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) + /// FRECPX Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; FRECPX Zresult.S, Pg/M, Zop.S + /// svfloat32_t svrecpx[_f32]_x(svbool_t pg, svfloat32_t op) + /// FRECPX Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; FRECPX Zresult.S, Pg/M, Zop.S + /// svfloat32_t svrecpx[_f32]_z(svbool_t pg, svfloat32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; FRECPX Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_HR_3A FRECPX ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_frecpx, EA_SCALABLE, REG_V5, REG_P5, REG_V5, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ReciprocalExponent(Vector value) => ReciprocalExponent(value); + + /// + /// svfloat64_t svrecpx[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) + /// FRECPX Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; FRECPX Zresult.D, Pg/M, Zop.D + /// svfloat64_t svrecpx[_f64]_x(svbool_t pg, svfloat64_t op) + /// FRECPX Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; FRECPX Zresult.D, Pg/M, Zop.D + /// svfloat64_t svrecpx[_f64]_z(svbool_t pg, svfloat64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; FRECPX Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_HR_3A FRECPX ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_frecpx, EA_SCALABLE, REG_V5, REG_P5, REG_V5, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ReciprocalExponent(Vector value) => ReciprocalExponent(value); + + + /// ReciprocalSqrtEstimate : Reciprocal square root estimate + + /// + /// svfloat32_t svrsqrte[_f32](svfloat32_t op) + /// FRSQRTE Zresult.S, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_HF_2A FRSQRTE ., . + /// theEmitter->emitIns_R_R(INS_sve_frsqrte, EA_SCALABLE, REG_V5, REG_V3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_frsqrte, EA_SCALABLE, REG_V9, REG_V5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ReciprocalSqrtEstimate(Vector value) => ReciprocalSqrtEstimate(value); + + /// + /// svfloat64_t svrsqrte[_f64](svfloat64_t op) + /// FRSQRTE Zresult.D, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_HF_2A FRSQRTE ., . + /// theEmitter->emitIns_R_R(INS_sve_frsqrte, EA_SCALABLE, REG_V5, REG_V3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_frsqrte, EA_SCALABLE, REG_V9, REG_V5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ReciprocalSqrtEstimate(Vector value) => ReciprocalSqrtEstimate(value); + + + /// ReciprocalSqrtStep : Reciprocal square root step + + /// + /// svfloat32_t svrsqrts[_f32](svfloat32_t op1, svfloat32_t op2) + /// FRSQRTS Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_HK_3A FRSQRTS ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_frsqrts, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ReciprocalSqrtStep(Vector left, Vector right) => ReciprocalSqrtStep(left, right); + + /// + /// svfloat64_t svrsqrts[_f64](svfloat64_t op1, svfloat64_t op2) + /// FRSQRTS Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_HK_3A FRSQRTS ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_frsqrts, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ReciprocalSqrtStep(Vector left, Vector right) => ReciprocalSqrtStep(left, right); + + + /// ReciprocalStep : Reciprocal step + + /// + /// svfloat32_t svrecps[_f32](svfloat32_t op1, svfloat32_t op2) + /// FRECPS Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_HK_3A FRECPS ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_frecps, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ReciprocalStep(Vector left, Vector right) => ReciprocalStep(left, right); + + /// + /// svfloat64_t svrecps[_f64](svfloat64_t op1, svfloat64_t op2) + /// FRECPS Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_HK_3A FRECPS ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_frecps, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ReciprocalStep(Vector left, Vector right) => ReciprocalStep(left, right); + + + /// ReverseBits : Reverse bits + + /// + /// svint8_t svrbit[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) + /// RBIT Ztied.B, Pg/M, Zop.B + /// MOVPRFX Zresult, Zinactive; RBIT Zresult.B, Pg/M, Zop.B + /// svint8_t svrbit[_s8]_x(svbool_t pg, svint8_t op) + /// RBIT Ztied.B, Pg/M, Ztied.B + /// MOVPRFX Zresult, Zop; RBIT Zresult.B, Pg/M, Zop.B + /// svint8_t svrbit[_s8]_z(svbool_t pg, svint8_t op) + /// MOVPRFX Zresult.B, Pg/Z, Zop.B; RBIT Zresult.B, Pg/M, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_CU_3A RBIT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ReverseBits(Vector value) => ReverseBits(value); + + /// + /// svint16_t svrbit[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) + /// RBIT Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; RBIT Zresult.H, Pg/M, Zop.H + /// svint16_t svrbit[_s16]_x(svbool_t pg, svint16_t op) + /// RBIT Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; RBIT Zresult.H, Pg/M, Zop.H + /// svint16_t svrbit[_s16]_z(svbool_t pg, svint16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; RBIT Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_CU_3A RBIT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ReverseBits(Vector value) => ReverseBits(value); + + /// + /// svint32_t svrbit[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// RBIT Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; RBIT Zresult.S, Pg/M, Zop.S + /// svint32_t svrbit[_s32]_x(svbool_t pg, svint32_t op) + /// RBIT Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; RBIT Zresult.S, Pg/M, Zop.S + /// svint32_t svrbit[_s32]_z(svbool_t pg, svint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; RBIT Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_CU_3A RBIT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ReverseBits(Vector value) => ReverseBits(value); + + /// + /// svint64_t svrbit[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// RBIT Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; RBIT Zresult.D, Pg/M, Zop.D + /// svint64_t svrbit[_s64]_x(svbool_t pg, svint64_t op) + /// RBIT Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; RBIT Zresult.D, Pg/M, Zop.D + /// svint64_t svrbit[_s64]_z(svbool_t pg, svint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; RBIT Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_CU_3A RBIT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ReverseBits(Vector value) => ReverseBits(value); + + /// + /// svuint8_t svrbit[_u8]_m(svuint8_t inactive, svbool_t pg, svuint8_t op) + /// RBIT Ztied.B, Pg/M, Zop.B + /// MOVPRFX Zresult, Zinactive; RBIT Zresult.B, Pg/M, Zop.B + /// svuint8_t svrbit[_u8]_x(svbool_t pg, svuint8_t op) + /// RBIT Ztied.B, Pg/M, Ztied.B + /// MOVPRFX Zresult, Zop; RBIT Zresult.B, Pg/M, Zop.B + /// svuint8_t svrbit[_u8]_z(svbool_t pg, svuint8_t op) + /// MOVPRFX Zresult.B, Pg/Z, Zop.B; RBIT Zresult.B, Pg/M, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_CU_3A RBIT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ReverseBits(Vector value) => ReverseBits(value); + + /// + /// svuint16_t svrbit[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) + /// RBIT Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; RBIT Zresult.H, Pg/M, Zop.H + /// svuint16_t svrbit[_u16]_x(svbool_t pg, svuint16_t op) + /// RBIT Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; RBIT Zresult.H, Pg/M, Zop.H + /// svuint16_t svrbit[_u16]_z(svbool_t pg, svuint16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; RBIT Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_CU_3A RBIT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ReverseBits(Vector value) => ReverseBits(value); + + /// + /// svuint32_t svrbit[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// RBIT Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; RBIT Zresult.S, Pg/M, Zop.S + /// svuint32_t svrbit[_u32]_x(svbool_t pg, svuint32_t op) + /// RBIT Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; RBIT Zresult.S, Pg/M, Zop.S + /// svuint32_t svrbit[_u32]_z(svbool_t pg, svuint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; RBIT Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_CU_3A RBIT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ReverseBits(Vector value) => ReverseBits(value); + + /// + /// svuint64_t svrbit[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// RBIT Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; RBIT Zresult.D, Pg/M, Zop.D + /// svuint64_t svrbit[_u64]_x(svbool_t pg, svuint64_t op) + /// RBIT Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; RBIT Zresult.D, Pg/M, Zop.D + /// svuint64_t svrbit[_u64]_z(svbool_t pg, svuint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; RBIT Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_CU_3A RBIT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_rbit, EA_SCALABLE, REG_V28, REG_P1, REG_V19, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ReverseBits(Vector value) => ReverseBits(value); + + + /// ReverseElement : Reverse all elements + + /// + /// svint8_t svrev[_s8](svint8_t op) + /// REV Zresult.B, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_CJ_2A REV ., . + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P1, REG_P2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P4, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P3, REG_P7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P0, REG_P6, INS_OPTS_SCALABLE_D); + /// IF_SVE_CG_2A REV ., . + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V4, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V7, REG_V1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ReverseElement(Vector value) => ReverseElement(value); + + /// + /// svint16_t svrev[_s16](svint16_t op) + /// REV Zresult.H, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_CJ_2A REV ., . + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P1, REG_P2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P4, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P3, REG_P7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P0, REG_P6, INS_OPTS_SCALABLE_D); + /// IF_SVE_CG_2A REV ., . + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V4, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V7, REG_V1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ReverseElement(Vector value) => ReverseElement(value); + + /// + /// svint32_t svrev[_s32](svint32_t op) + /// REV Zresult.S, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_CJ_2A REV ., . + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P1, REG_P2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P4, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P3, REG_P7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P0, REG_P6, INS_OPTS_SCALABLE_D); + /// IF_SVE_CG_2A REV ., . + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V4, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V7, REG_V1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ReverseElement(Vector value) => ReverseElement(value); + + /// + /// svint64_t svrev[_s64](svint64_t op) + /// REV Zresult.D, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_CJ_2A REV ., . + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P1, REG_P2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P4, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P3, REG_P7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P0, REG_P6, INS_OPTS_SCALABLE_D); + /// IF_SVE_CG_2A REV ., . + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V4, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V7, REG_V1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ReverseElement(Vector value) => ReverseElement(value); + + /// + /// svuint8_t svrev[_u8](svuint8_t op) + /// REV Zresult.B, Zop.B + /// svbool_t svrev_b8(svbool_t op) + /// REV Presult.B, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_CJ_2A REV ., . + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P1, REG_P2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P4, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P3, REG_P7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P0, REG_P6, INS_OPTS_SCALABLE_D); + /// IF_SVE_CG_2A REV ., . + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V4, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V7, REG_V1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ReverseElement(Vector value) => ReverseElement(value); + + /// + /// svuint16_t svrev[_u16](svuint16_t op) + /// REV Zresult.H, Zop.H + /// svbool_t svrev_b16(svbool_t op) + /// REV Presult.H, Pop.H + /// + /// codegenarm64test: + /// IF_SVE_CJ_2A REV ., . + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P1, REG_P2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P4, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P3, REG_P7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P0, REG_P6, INS_OPTS_SCALABLE_D); + /// IF_SVE_CG_2A REV ., . + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V4, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V7, REG_V1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ReverseElement(Vector value) => ReverseElement(value); + + /// + /// svuint32_t svrev[_u32](svuint32_t op) + /// REV Zresult.S, Zop.S + /// svbool_t svrev_b32(svbool_t op) + /// REV Presult.S, Pop.S + /// + /// codegenarm64test: + /// IF_SVE_CJ_2A REV ., . + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P1, REG_P2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P4, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P3, REG_P7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P0, REG_P6, INS_OPTS_SCALABLE_D); + /// IF_SVE_CG_2A REV ., . + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V4, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V7, REG_V1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ReverseElement(Vector value) => ReverseElement(value); + + /// + /// svuint64_t svrev[_u64](svuint64_t op) + /// REV Zresult.D, Zop.D + /// svbool_t svrev_b64(svbool_t op) + /// REV Presult.D, Pop.D + /// + /// codegenarm64test: + /// IF_SVE_CJ_2A REV ., . + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P1, REG_P2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P4, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P3, REG_P7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P0, REG_P6, INS_OPTS_SCALABLE_D); + /// IF_SVE_CG_2A REV ., . + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V4, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V7, REG_V1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ReverseElement(Vector value) => ReverseElement(value); + + /// + /// svfloat32_t svrev[_f32](svfloat32_t op) + /// REV Zresult.S, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_CJ_2A REV ., . + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P1, REG_P2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P4, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P3, REG_P7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P0, REG_P6, INS_OPTS_SCALABLE_D); + /// IF_SVE_CG_2A REV ., . + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V4, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V7, REG_V1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ReverseElement(Vector value) => ReverseElement(value); + + /// + /// svfloat64_t svrev[_f64](svfloat64_t op) + /// REV Zresult.D, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_CJ_2A REV ., . + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P1, REG_P2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P4, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P3, REG_P7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P0, REG_P6, INS_OPTS_SCALABLE_D); + /// IF_SVE_CG_2A REV ., . + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V4, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V7, REG_V1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ReverseElement(Vector value) => ReverseElement(value); + + + /// ReverseElement16 : Reverse halfwords within elements + + /// + /// svint32_t svrevh[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// REVH Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; REVH Zresult.S, Pg/M, Zop.S + /// svint32_t svrevh[_s32]_x(svbool_t pg, svint32_t op) + /// REVH Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; REVH Zresult.S, Pg/M, Zop.S + /// svint32_t svrevh[_s32]_z(svbool_t pg, svint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; REVH Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_CU_3A REVH ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_revh, EA_SCALABLE, REG_V26, REG_P3, REG_V17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_revh, EA_SCALABLE, REG_V26, REG_P3, REG_V17, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ReverseElement16(Vector value) => ReverseElement16(value); + + /// + /// svint64_t svrevh[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// REVH Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; REVH Zresult.D, Pg/M, Zop.D + /// svint64_t svrevh[_s64]_x(svbool_t pg, svint64_t op) + /// REVH Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; REVH Zresult.D, Pg/M, Zop.D + /// svint64_t svrevh[_s64]_z(svbool_t pg, svint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; REVH Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_CU_3A REVH ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_revh, EA_SCALABLE, REG_V26, REG_P3, REG_V17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_revh, EA_SCALABLE, REG_V26, REG_P3, REG_V17, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ReverseElement16(Vector value) => ReverseElement16(value); + + /// + /// svuint32_t svrevh[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// REVH Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; REVH Zresult.S, Pg/M, Zop.S + /// svuint32_t svrevh[_u32]_x(svbool_t pg, svuint32_t op) + /// REVH Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; REVH Zresult.S, Pg/M, Zop.S + /// svuint32_t svrevh[_u32]_z(svbool_t pg, svuint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; REVH Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_CU_3A REVH ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_revh, EA_SCALABLE, REG_V26, REG_P3, REG_V17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_revh, EA_SCALABLE, REG_V26, REG_P3, REG_V17, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ReverseElement16(Vector value) => ReverseElement16(value); + + /// + /// svuint64_t svrevh[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// REVH Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; REVH Zresult.D, Pg/M, Zop.D + /// svuint64_t svrevh[_u64]_x(svbool_t pg, svuint64_t op) + /// REVH Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; REVH Zresult.D, Pg/M, Zop.D + /// svuint64_t svrevh[_u64]_z(svbool_t pg, svuint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; REVH Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_CU_3A REVH ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_revh, EA_SCALABLE, REG_V26, REG_P3, REG_V17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_revh, EA_SCALABLE, REG_V26, REG_P3, REG_V17, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ReverseElement16(Vector value) => ReverseElement16(value); + + + /// ReverseElement32 : Reverse words within elements + + /// + /// svint64_t svrevw[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// REVW Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; REVW Zresult.D, Pg/M, Zop.D + /// svint64_t svrevw[_s64]_x(svbool_t pg, svint64_t op) + /// REVW Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; REVW Zresult.D, Pg/M, Zop.D + /// svint64_t svrevw[_s64]_z(svbool_t pg, svint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; REVW Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_CU_3A REVW .D, /M, .D + /// theEmitter->emitIns_R_R_R(INS_sve_revw, EA_SCALABLE, REG_V25, REG_P4, REG_V16, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ReverseElement32(Vector value) => ReverseElement32(value); + + /// + /// svuint64_t svrevw[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// REVW Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; REVW Zresult.D, Pg/M, Zop.D + /// svuint64_t svrevw[_u64]_x(svbool_t pg, svuint64_t op) + /// REVW Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; REVW Zresult.D, Pg/M, Zop.D + /// svuint64_t svrevw[_u64]_z(svbool_t pg, svuint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; REVW Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_CU_3A REVW .D, /M, .D + /// theEmitter->emitIns_R_R_R(INS_sve_revw, EA_SCALABLE, REG_V25, REG_P4, REG_V16, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ReverseElement32(Vector value) => ReverseElement32(value); + + + /// ReverseElement8 : Reverse bytes within elements + + /// + /// svint16_t svrevb[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) + /// REVB Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; REVB Zresult.H, Pg/M, Zop.H + /// svint16_t svrevb[_s16]_x(svbool_t pg, svint16_t op) + /// REVB Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; REVB Zresult.H, Pg/M, Zop.H + /// svint16_t svrevb[_s16]_z(svbool_t pg, svint16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; REVB Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_CU_3A REVB ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_revb, EA_SCALABLE, REG_V27, REG_P2, REG_V18, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_revb, EA_SCALABLE, REG_V27, REG_P2, REG_V18, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_revb, EA_SCALABLE, REG_V27, REG_P2, REG_V18, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ReverseElement8(Vector value) => ReverseElement8(value); + + /// + /// svint32_t svrevb[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// REVB Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; REVB Zresult.S, Pg/M, Zop.S + /// svint32_t svrevb[_s32]_x(svbool_t pg, svint32_t op) + /// REVB Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; REVB Zresult.S, Pg/M, Zop.S + /// svint32_t svrevb[_s32]_z(svbool_t pg, svint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; REVB Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_CU_3A REVB ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_revb, EA_SCALABLE, REG_V27, REG_P2, REG_V18, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_revb, EA_SCALABLE, REG_V27, REG_P2, REG_V18, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_revb, EA_SCALABLE, REG_V27, REG_P2, REG_V18, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ReverseElement8(Vector value) => ReverseElement8(value); + + /// + /// svint64_t svrevb[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// REVB Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; REVB Zresult.D, Pg/M, Zop.D + /// svint64_t svrevb[_s64]_x(svbool_t pg, svint64_t op) + /// REVB Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; REVB Zresult.D, Pg/M, Zop.D + /// svint64_t svrevb[_s64]_z(svbool_t pg, svint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; REVB Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_CU_3A REVB ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_revb, EA_SCALABLE, REG_V27, REG_P2, REG_V18, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_revb, EA_SCALABLE, REG_V27, REG_P2, REG_V18, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_revb, EA_SCALABLE, REG_V27, REG_P2, REG_V18, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ReverseElement8(Vector value) => ReverseElement8(value); + + /// + /// svuint16_t svrevb[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) + /// REVB Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; REVB Zresult.H, Pg/M, Zop.H + /// svuint16_t svrevb[_u16]_x(svbool_t pg, svuint16_t op) + /// REVB Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; REVB Zresult.H, Pg/M, Zop.H + /// svuint16_t svrevb[_u16]_z(svbool_t pg, svuint16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; REVB Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_CU_3A REVB ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_revb, EA_SCALABLE, REG_V27, REG_P2, REG_V18, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_revb, EA_SCALABLE, REG_V27, REG_P2, REG_V18, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_revb, EA_SCALABLE, REG_V27, REG_P2, REG_V18, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ReverseElement8(Vector value) => ReverseElement8(value); + + /// + /// svuint32_t svrevb[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// REVB Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; REVB Zresult.S, Pg/M, Zop.S + /// svuint32_t svrevb[_u32]_x(svbool_t pg, svuint32_t op) + /// REVB Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; REVB Zresult.S, Pg/M, Zop.S + /// svuint32_t svrevb[_u32]_z(svbool_t pg, svuint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; REVB Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_CU_3A REVB ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_revb, EA_SCALABLE, REG_V27, REG_P2, REG_V18, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_revb, EA_SCALABLE, REG_V27, REG_P2, REG_V18, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_revb, EA_SCALABLE, REG_V27, REG_P2, REG_V18, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ReverseElement8(Vector value) => ReverseElement8(value); + + /// + /// svuint64_t svrevb[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// REVB Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; REVB Zresult.D, Pg/M, Zop.D + /// svuint64_t svrevb[_u64]_x(svbool_t pg, svuint64_t op) + /// REVB Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; REVB Zresult.D, Pg/M, Zop.D + /// svuint64_t svrevb[_u64]_z(svbool_t pg, svuint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; REVB Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_CU_3A REVB ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_revb, EA_SCALABLE, REG_V27, REG_P2, REG_V18, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_revb, EA_SCALABLE, REG_V27, REG_P2, REG_V18, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_revb, EA_SCALABLE, REG_V27, REG_P2, REG_V18, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ReverseElement8(Vector value) => ReverseElement8(value); + + + /// RoundAwayFromZero : Round to nearest, ties away from zero + + /// + /// svfloat32_t svrinta[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) + /// FRINTA Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; FRINTA Zresult.S, Pg/M, Zop.S + /// svfloat32_t svrinta[_f32]_x(svbool_t pg, svfloat32_t op) + /// FRINTA Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; FRINTA Zresult.S, Pg/M, Zop.S + /// svfloat32_t svrinta[_f32]_z(svbool_t pg, svfloat32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; FRINTA Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_HQ_3A FRINTA ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_frinta, EA_SCALABLE, REG_V26, REG_P7, REG_V2, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector RoundAwayFromZero(Vector value) => RoundAwayFromZero(value); + + /// + /// svfloat64_t svrinta[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) + /// FRINTA Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; FRINTA Zresult.D, Pg/M, Zop.D + /// svfloat64_t svrinta[_f64]_x(svbool_t pg, svfloat64_t op) + /// FRINTA Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; FRINTA Zresult.D, Pg/M, Zop.D + /// svfloat64_t svrinta[_f64]_z(svbool_t pg, svfloat64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; FRINTA Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_HQ_3A FRINTA ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_frinta, EA_SCALABLE, REG_V26, REG_P7, REG_V2, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector RoundAwayFromZero(Vector value) => RoundAwayFromZero(value); + + + /// RoundToNearest : Round to nearest, ties to even + + /// + /// svfloat32_t svrintn[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) + /// FRINTN Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; FRINTN Zresult.S, Pg/M, Zop.S + /// svfloat32_t svrintn[_f32]_x(svbool_t pg, svfloat32_t op) + /// FRINTN Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; FRINTN Zresult.S, Pg/M, Zop.S + /// svfloat32_t svrintn[_f32]_z(svbool_t pg, svfloat32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; FRINTN Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_HQ_3A FRINTN ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_frintn, EA_SCALABLE, REG_V29, REG_P4, REG_V10, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector RoundToNearest(Vector value) => RoundToNearest(value); + + /// + /// svfloat64_t svrintn[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) + /// FRINTN Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; FRINTN Zresult.D, Pg/M, Zop.D + /// svfloat64_t svrintn[_f64]_x(svbool_t pg, svfloat64_t op) + /// FRINTN Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; FRINTN Zresult.D, Pg/M, Zop.D + /// svfloat64_t svrintn[_f64]_z(svbool_t pg, svfloat64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; FRINTN Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_HQ_3A FRINTN ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_frintn, EA_SCALABLE, REG_V29, REG_P4, REG_V10, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector RoundToNearest(Vector value) => RoundToNearest(value); + + + /// RoundToNegativeInfinity : Round towards -∞ + + /// + /// svfloat32_t svrintm[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) + /// FRINTM Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; FRINTM Zresult.S, Pg/M, Zop.S + /// svfloat32_t svrintm[_f32]_x(svbool_t pg, svfloat32_t op) + /// FRINTM Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; FRINTM Zresult.S, Pg/M, Zop.S + /// svfloat32_t svrintm[_f32]_z(svbool_t pg, svfloat32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; FRINTM Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_HQ_3A FRINTM ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_frintm, EA_SCALABLE, REG_V28, REG_P5, REG_V0, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector RoundToNegativeInfinity(Vector value) => RoundToNegativeInfinity(value); + + /// + /// svfloat64_t svrintm[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) + /// FRINTM Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; FRINTM Zresult.D, Pg/M, Zop.D + /// svfloat64_t svrintm[_f64]_x(svbool_t pg, svfloat64_t op) + /// FRINTM Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; FRINTM Zresult.D, Pg/M, Zop.D + /// svfloat64_t svrintm[_f64]_z(svbool_t pg, svfloat64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; FRINTM Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_HQ_3A FRINTM ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_frintm, EA_SCALABLE, REG_V28, REG_P5, REG_V0, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector RoundToNegativeInfinity(Vector value) => RoundToNegativeInfinity(value); + + + /// RoundToPositiveInfinity : Round towards +∞ + + /// + /// svfloat32_t svrintp[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) + /// FRINTP Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; FRINTP Zresult.S, Pg/M, Zop.S + /// svfloat32_t svrintp[_f32]_x(svbool_t pg, svfloat32_t op) + /// FRINTP Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; FRINTP Zresult.S, Pg/M, Zop.S + /// svfloat32_t svrintp[_f32]_z(svbool_t pg, svfloat32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; FRINTP Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_HQ_3A FRINTP ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_frintp, EA_SCALABLE, REG_V30, REG_P3, REG_V11, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector RoundToPositiveInfinity(Vector value) => RoundToPositiveInfinity(value); + + /// + /// svfloat64_t svrintp[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) + /// FRINTP Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; FRINTP Zresult.D, Pg/M, Zop.D + /// svfloat64_t svrintp[_f64]_x(svbool_t pg, svfloat64_t op) + /// FRINTP Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; FRINTP Zresult.D, Pg/M, Zop.D + /// svfloat64_t svrintp[_f64]_z(svbool_t pg, svfloat64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; FRINTP Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_HQ_3A FRINTP ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_frintp, EA_SCALABLE, REG_V30, REG_P3, REG_V11, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector RoundToPositiveInfinity(Vector value) => RoundToPositiveInfinity(value); + + + /// RoundToZero : Round towards zero + + /// + /// svfloat32_t svrintz[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) + /// FRINTZ Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; FRINTZ Zresult.S, Pg/M, Zop.S + /// svfloat32_t svrintz[_f32]_x(svbool_t pg, svfloat32_t op) + /// FRINTZ Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; FRINTZ Zresult.S, Pg/M, Zop.S + /// svfloat32_t svrintz[_f32]_z(svbool_t pg, svfloat32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; FRINTZ Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_HQ_3A FRINTZ ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_frintz, EA_SCALABLE, REG_V0, REG_P0, REG_V13, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector RoundToZero(Vector value) => RoundToZero(value); + + /// + /// svfloat64_t svrintz[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) + /// FRINTZ Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; FRINTZ Zresult.D, Pg/M, Zop.D + /// svfloat64_t svrintz[_f64]_x(svbool_t pg, svfloat64_t op) + /// FRINTZ Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; FRINTZ Zresult.D, Pg/M, Zop.D + /// svfloat64_t svrintz[_f64]_z(svbool_t pg, svfloat64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; FRINTZ Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_HQ_3A FRINTZ ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_frintz, EA_SCALABLE, REG_V0, REG_P0, REG_V13, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector RoundToZero(Vector value) => RoundToZero(value); + + + + + /// SaturatingDecrementBy16BitElementCount : Saturating decrement by number of halfword elements + + /// + /// int32_t svqdech_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) + /// SQDECH Xtied, Wtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_sqdech - not implemented in coreclr + /// + public static unsafe int SaturatingDecrementBy16BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy16BitElementCount(value, scale, pattern); + + /// + /// int64_t svqdech_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) + /// SQDECH Xtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_sqdech - not implemented in coreclr + /// + public static unsafe long SaturatingDecrementBy16BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy16BitElementCount(value, scale, pattern); + + /// + /// uint32_t svqdech_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// UQDECH Wtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_uqdech - not implemented in coreclr + /// + public static unsafe uint SaturatingDecrementBy16BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy16BitElementCount(value, scale, pattern); + + /// + /// uint64_t svqdech_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// UQDECH Xtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_uqdech - not implemented in coreclr + /// + public static unsafe ulong SaturatingDecrementBy16BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy16BitElementCount(value, scale, pattern); + + /// + /// svint16_t svqdech_pat[_s16](svint16_t op, enum svpattern pattern, uint64_t imm_factor) + /// SQDECH Ztied.H, pattern, MUL #imm_factor + /// MOVPRFX Zresult, Zop; SQDECH Zresult.H, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_sqdech - not implemented in coreclr + /// + public static unsafe Vector SaturatingDecrementBy16BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy16BitElementCount(value, scale, pattern); + + /// + /// svuint16_t svqdech_pat[_u16](svuint16_t op, enum svpattern pattern, uint64_t imm_factor) + /// UQDECH Ztied.H, pattern, MUL #imm_factor + /// MOVPRFX Zresult, Zop; UQDECH Zresult.H, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_uqdech - not implemented in coreclr + /// + public static unsafe Vector SaturatingDecrementBy16BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy16BitElementCount(value, scale, pattern); + + + /// SaturatingDecrementBy32BitElementCount : Saturating decrement by number of word elements + + /// + /// int32_t svqdecw_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) + /// SQDECW Xtied, Wtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_sqdecw - not implemented in coreclr + /// + public static unsafe int SaturatingDecrementBy32BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy32BitElementCount(value, scale, pattern); + + /// + /// int64_t svqdecw_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) + /// SQDECW Xtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_sqdecw - not implemented in coreclr + /// + public static unsafe long SaturatingDecrementBy32BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy32BitElementCount(value, scale, pattern); + + /// + /// uint32_t svqdecw_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// UQDECW Wtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_uqdecw - not implemented in coreclr + /// + public static unsafe uint SaturatingDecrementBy32BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy32BitElementCount(value, scale, pattern); + + /// + /// uint64_t svqdecw_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// UQDECW Xtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_uqdecw - not implemented in coreclr + /// + public static unsafe ulong SaturatingDecrementBy32BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy32BitElementCount(value, scale, pattern); + + /// + /// svint32_t svqdecw_pat[_s32](svint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// SQDECW Ztied.S, pattern, MUL #imm_factor + /// MOVPRFX Zresult, Zop; SQDECW Zresult.S, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_sqdecw - not implemented in coreclr + /// + public static unsafe Vector SaturatingDecrementBy32BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy32BitElementCount(value, scale, pattern); + + /// + /// svuint32_t svqdecw_pat[_u32](svuint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// UQDECW Ztied.S, pattern, MUL #imm_factor + /// MOVPRFX Zresult, Zop; UQDECW Zresult.S, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_uqdecw - not implemented in coreclr + /// + public static unsafe Vector SaturatingDecrementBy32BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy32BitElementCount(value, scale, pattern); + + + /// SaturatingDecrementBy64BitElementCount : Saturating decrement by number of doubleword elements + + /// + /// int32_t svqdecd_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) + /// SQDECD Xtied, Wtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_sqdecd - not implemented in coreclr + /// + public static unsafe int SaturatingDecrementBy64BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy64BitElementCount(value, scale, pattern); + + /// + /// int64_t svqdecd_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) + /// SQDECD Xtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_sqdecd - not implemented in coreclr + /// + public static unsafe long SaturatingDecrementBy64BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy64BitElementCount(value, scale, pattern); + + /// + /// uint32_t svqdecd_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// UQDECD Wtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_uqdecd - not implemented in coreclr + /// + public static unsafe uint SaturatingDecrementBy64BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy64BitElementCount(value, scale, pattern); + + /// + /// uint64_t svqdecd_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// UQDECD Xtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_uqdecd - not implemented in coreclr + /// + public static unsafe ulong SaturatingDecrementBy64BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy64BitElementCount(value, scale, pattern); + + /// + /// svint64_t svqdecd_pat[_s64](svint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// SQDECD Ztied.D, pattern, MUL #imm_factor + /// MOVPRFX Zresult, Zop; SQDECD Zresult.D, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_sqdecd - not implemented in coreclr + /// + public static unsafe Vector SaturatingDecrementBy64BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy64BitElementCount(value, scale, pattern); + + /// + /// svuint64_t svqdecd_pat[_u64](svuint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// UQDECD Ztied.D, pattern, MUL #imm_factor + /// MOVPRFX Zresult, Zop; UQDECD Zresult.D, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_uqdecd - not implemented in coreclr + /// + public static unsafe Vector SaturatingDecrementBy64BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy64BitElementCount(value, scale, pattern); + + + /// SaturatingDecrementBy8BitElementCount : Saturating decrement by number of byte elements + + /// + /// int32_t svqdecb_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) + /// SQDECB Xtied, Wtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_sqdecb - not implemented in coreclr + /// + public static unsafe int SaturatingDecrementBy8BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy8BitElementCount(value, scale, pattern); + + /// + /// int64_t svqdecb_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) + /// SQDECB Xtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_sqdecb - not implemented in coreclr + /// + public static unsafe long SaturatingDecrementBy8BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy8BitElementCount(value, scale, pattern); + + /// + /// uint32_t svqdecb_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// UQDECB Wtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_uqdecb - not implemented in coreclr + /// + public static unsafe uint SaturatingDecrementBy8BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy8BitElementCount(value, scale, pattern); + + /// + /// uint64_t svqdecb_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// UQDECB Xtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_uqdecb - not implemented in coreclr + /// + public static unsafe ulong SaturatingDecrementBy8BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingDecrementBy8BitElementCount(value, scale, pattern); + + + /// SaturatingDecrementByActiveElementCount : Saturating decrement by active element count + + /// + /// svint16_t svqdecp[_s16](svint16_t op, svbool_t pg) + /// SQDECP Ztied.H, Pg + /// MOVPRFX Zresult, Zop; SQDECP Zresult.H, Pg + /// + /// codegenarm64test: + /// IF_SVE_DO_2A SQDECP , ., + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A SQDECP ., . + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V0, REG_P0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V1, REG_P1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V2, REG_P2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingDecrementByActiveElementCount(Vector value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// svint32_t svqdecp[_s32](svint32_t op, svbool_t pg) + /// SQDECP Ztied.S, Pg + /// MOVPRFX Zresult, Zop; SQDECP Zresult.S, Pg + /// + /// codegenarm64test: + /// IF_SVE_DO_2A SQDECP , ., + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A SQDECP ., . + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V0, REG_P0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V1, REG_P1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V2, REG_P2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingDecrementByActiveElementCount(Vector value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// svint64_t svqdecp[_s64](svint64_t op, svbool_t pg) + /// SQDECP Ztied.D, Pg + /// MOVPRFX Zresult, Zop; SQDECP Zresult.D, Pg + /// + /// codegenarm64test: + /// IF_SVE_DO_2A SQDECP , ., + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A SQDECP ., . + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V0, REG_P0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V1, REG_P1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V2, REG_P2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingDecrementByActiveElementCount(Vector value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// int32_t svqdecp[_n_s32]_b8(int32_t op, svbool_t pg) + /// SQDECP Xtied, Pg.B, Wtied + /// + /// codegenarm64test: + /// IF_SVE_DO_2A SQDECP , ., + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A SQDECP ., . + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V0, REG_P0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V1, REG_P1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V2, REG_P2, INS_OPTS_SCALABLE_D); + /// + public static unsafe int SaturatingDecrementByActiveElementCount(int value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// int64_t svqdecp[_n_s64]_b8(int64_t op, svbool_t pg) + /// SQDECP Xtied, Pg.B + /// + /// codegenarm64test: + /// IF_SVE_DO_2A SQDECP , ., + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A SQDECP ., . + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V0, REG_P0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V1, REG_P1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V2, REG_P2, INS_OPTS_SCALABLE_D); + /// + public static unsafe long SaturatingDecrementByActiveElementCount(long value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// uint32_t svqdecp[_n_u32]_b8(uint32_t op, svbool_t pg) + /// UQDECP Wtied, Pg.B + /// + /// codegenarm64test: + /// IF_SVE_DO_2A UQDECP , . + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A UQDECP ., . + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V6, REG_P6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V7, REG_P7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V8, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe uint SaturatingDecrementByActiveElementCount(uint value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// uint64_t svqdecp[_n_u64]_b8(uint64_t op, svbool_t pg) + /// UQDECP Xtied, Pg.B + /// + /// codegenarm64test: + /// IF_SVE_DO_2A UQDECP , . + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A UQDECP ., . + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V6, REG_P6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V7, REG_P7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V8, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe ulong SaturatingDecrementByActiveElementCount(ulong value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// int32_t svqdecp[_n_s32]_b16(int32_t op, svbool_t pg) + /// SQDECP Xtied, Pg.H, Wtied + /// + /// codegenarm64test: + /// IF_SVE_DO_2A SQDECP , ., + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A SQDECP ., . + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V0, REG_P0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V1, REG_P1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V2, REG_P2, INS_OPTS_SCALABLE_D); + /// + public static unsafe int SaturatingDecrementByActiveElementCount(int value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// int64_t svqdecp[_n_s64]_b16(int64_t op, svbool_t pg) + /// SQDECP Xtied, Pg.H + /// + /// codegenarm64test: + /// IF_SVE_DO_2A SQDECP , ., + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A SQDECP ., . + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V0, REG_P0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V1, REG_P1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V2, REG_P2, INS_OPTS_SCALABLE_D); + /// + public static unsafe long SaturatingDecrementByActiveElementCount(long value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// uint32_t svqdecp[_n_u32]_b16(uint32_t op, svbool_t pg) + /// UQDECP Wtied, Pg.H + /// + /// codegenarm64test: + /// IF_SVE_DO_2A UQDECP , . + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A UQDECP ., . + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V6, REG_P6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V7, REG_P7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V8, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe uint SaturatingDecrementByActiveElementCount(uint value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// uint64_t svqdecp[_n_u64]_b16(uint64_t op, svbool_t pg) + /// UQDECP Xtied, Pg.H + /// + /// codegenarm64test: + /// IF_SVE_DO_2A UQDECP , . + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A UQDECP ., . + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V6, REG_P6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V7, REG_P7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V8, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe ulong SaturatingDecrementByActiveElementCount(ulong value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// svuint16_t svqdecp[_u16](svuint16_t op, svbool_t pg) + /// UQDECP Ztied.H, Pg + /// MOVPRFX Zresult, Zop; UQDECP Zresult.H, Pg + /// + /// codegenarm64test: + /// IF_SVE_DO_2A UQDECP , . + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A UQDECP ., . + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V6, REG_P6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V7, REG_P7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V8, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingDecrementByActiveElementCount(Vector value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// int32_t svqdecp[_n_s32]_b32(int32_t op, svbool_t pg) + /// SQDECP Xtied, Pg.S, Wtied + /// + /// codegenarm64test: + /// IF_SVE_DO_2A SQDECP , ., + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A SQDECP ., . + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V0, REG_P0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V1, REG_P1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V2, REG_P2, INS_OPTS_SCALABLE_D); + /// + public static unsafe int SaturatingDecrementByActiveElementCount(int value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// int64_t svqdecp[_n_s64]_b32(int64_t op, svbool_t pg) + /// SQDECP Xtied, Pg.S + /// + /// codegenarm64test: + /// IF_SVE_DO_2A SQDECP , ., + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A SQDECP ., . + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V0, REG_P0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V1, REG_P1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V2, REG_P2, INS_OPTS_SCALABLE_D); + /// + public static unsafe long SaturatingDecrementByActiveElementCount(long value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// uint32_t svqdecp[_n_u32]_b32(uint32_t op, svbool_t pg) + /// UQDECP Wtied, Pg.S + /// + /// codegenarm64test: + /// IF_SVE_DO_2A UQDECP , . + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A UQDECP ., . + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V6, REG_P6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V7, REG_P7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V8, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe uint SaturatingDecrementByActiveElementCount(uint value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// uint64_t svqdecp[_n_u64]_b32(uint64_t op, svbool_t pg) + /// UQDECP Xtied, Pg.S + /// + /// codegenarm64test: + /// IF_SVE_DO_2A UQDECP , . + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A UQDECP ., . + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V6, REG_P6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V7, REG_P7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V8, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe ulong SaturatingDecrementByActiveElementCount(ulong value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// svuint32_t svqdecp[_u32](svuint32_t op, svbool_t pg) + /// UQDECP Ztied.S, Pg + /// MOVPRFX Zresult, Zop; UQDECP Zresult.S, Pg + /// + /// codegenarm64test: + /// IF_SVE_DO_2A UQDECP , . + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A UQDECP ., . + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V6, REG_P6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V7, REG_P7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V8, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingDecrementByActiveElementCount(Vector value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// int32_t svqdecp[_n_s32]_b64(int32_t op, svbool_t pg) + /// SQDECP Xtied, Pg.D, Wtied + /// + /// codegenarm64test: + /// IF_SVE_DO_2A SQDECP , ., + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A SQDECP ., . + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V0, REG_P0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V1, REG_P1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V2, REG_P2, INS_OPTS_SCALABLE_D); + /// + public static unsafe int SaturatingDecrementByActiveElementCount(int value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// int64_t svqdecp[_n_s64]_b64(int64_t op, svbool_t pg) + /// SQDECP Xtied, Pg.D + /// + /// codegenarm64test: + /// IF_SVE_DO_2A SQDECP , ., + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A SQDECP ., . + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V0, REG_P0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V1, REG_P1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqdecp, EA_SCALABLE, REG_V2, REG_P2, INS_OPTS_SCALABLE_D); + /// + public static unsafe long SaturatingDecrementByActiveElementCount(long value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// uint32_t svqdecp[_n_u32]_b64(uint32_t op, svbool_t pg) + /// UQDECP Wtied, Pg.D + /// + /// codegenarm64test: + /// IF_SVE_DO_2A UQDECP , . + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A UQDECP ., . + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V6, REG_P6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V7, REG_P7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V8, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe uint SaturatingDecrementByActiveElementCount(uint value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// uint64_t svqdecp[_n_u64]_b64(uint64_t op, svbool_t pg) + /// UQDECP Xtied, Pg.D + /// + /// codegenarm64test: + /// IF_SVE_DO_2A UQDECP , . + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A UQDECP ., . + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V6, REG_P6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V7, REG_P7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V8, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe ulong SaturatingDecrementByActiveElementCount(ulong value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + /// + /// svuint64_t svqdecp[_u64](svuint64_t op, svbool_t pg) + /// UQDECP Ztied.D, Pg + /// MOVPRFX Zresult, Zop; UQDECP Zresult.D, Pg + /// + /// codegenarm64test: + /// IF_SVE_DO_2A UQDECP , . + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A UQDECP ., . + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V6, REG_P6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V7, REG_P7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqdecp, EA_SCALABLE, REG_V8, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingDecrementByActiveElementCount(Vector value, Vector from) => SaturatingDecrementByActiveElementCount(value, from); + + + /// SaturatingIncrementBy16BitElementCount : Saturating increment by number of halfword elements + + /// + /// int32_t svqinch_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) + /// SQINCH Xtied, Wtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_sqinch - not implemented in coreclr + /// + public static unsafe int SaturatingIncrementBy16BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy16BitElementCount(value, scale, pattern); + + /// + /// int64_t svqinch_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) + /// SQINCH Xtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_sqinch - not implemented in coreclr + /// + public static unsafe long SaturatingIncrementBy16BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy16BitElementCount(value, scale, pattern); + + /// + /// uint32_t svqinch_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// UQINCH Wtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_uqinch - not implemented in coreclr + /// + public static unsafe uint SaturatingIncrementBy16BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy16BitElementCount(value, scale, pattern); + + /// + /// uint64_t svqinch_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// UQINCH Xtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_uqinch - not implemented in coreclr + /// + public static unsafe ulong SaturatingIncrementBy16BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy16BitElementCount(value, scale, pattern); + + /// + /// svint16_t svqinch_pat[_s16](svint16_t op, enum svpattern pattern, uint64_t imm_factor) + /// SQINCH Ztied.H, pattern, MUL #imm_factor + /// MOVPRFX Zresult, Zop; SQINCH Zresult.H, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_sqinch - not implemented in coreclr + /// + public static unsafe Vector SaturatingIncrementBy16BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy16BitElementCount(value, scale, pattern); + + /// + /// svuint16_t svqinch_pat[_u16](svuint16_t op, enum svpattern pattern, uint64_t imm_factor) + /// UQINCH Ztied.H, pattern, MUL #imm_factor + /// MOVPRFX Zresult, Zop; UQINCH Zresult.H, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_uqinch - not implemented in coreclr + /// + public static unsafe Vector SaturatingIncrementBy16BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy16BitElementCount(value, scale, pattern); + + + /// SaturatingIncrementBy32BitElementCount : Saturating increment by number of word elements + + /// + /// int32_t svqincw_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) + /// SQINCW Xtied, Wtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_sqincw - not implemented in coreclr + /// + public static unsafe int SaturatingIncrementBy32BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy32BitElementCount(value, scale, pattern); + + /// + /// int64_t svqincw_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) + /// SQINCW Xtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_sqincw - not implemented in coreclr + /// + public static unsafe long SaturatingIncrementBy32BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy32BitElementCount(value, scale, pattern); + + /// + /// uint32_t svqincw_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// UQINCW Wtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_uqincw - not implemented in coreclr + /// + public static unsafe uint SaturatingIncrementBy32BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy32BitElementCount(value, scale, pattern); + + /// + /// uint64_t svqincw_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// UQINCW Xtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_uqincw - not implemented in coreclr + /// + public static unsafe ulong SaturatingIncrementBy32BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy32BitElementCount(value, scale, pattern); + + /// + /// svint32_t svqincw_pat[_s32](svint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// SQINCW Ztied.S, pattern, MUL #imm_factor + /// MOVPRFX Zresult, Zop; SQINCW Zresult.S, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_sqincw - not implemented in coreclr + /// + public static unsafe Vector SaturatingIncrementBy32BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy32BitElementCount(value, scale, pattern); + + /// + /// svuint32_t svqincw_pat[_u32](svuint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// UQINCW Ztied.S, pattern, MUL #imm_factor + /// MOVPRFX Zresult, Zop; UQINCW Zresult.S, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_uqincw - not implemented in coreclr + /// + public static unsafe Vector SaturatingIncrementBy32BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy32BitElementCount(value, scale, pattern); + + + /// SaturatingIncrementBy64BitElementCount : Saturating increment by number of doubleword elements + + /// + /// int32_t svqincd_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) + /// SQINCD Xtied, Wtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_sqincd - not implemented in coreclr + /// + public static unsafe int SaturatingIncrementBy64BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy64BitElementCount(value, scale, pattern); + + /// + /// int64_t svqincd_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) + /// SQINCD Xtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_sqincd - not implemented in coreclr + /// + public static unsafe long SaturatingIncrementBy64BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy64BitElementCount(value, scale, pattern); + + /// + /// uint32_t svqincd_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// UQINCD Wtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_uqincd - not implemented in coreclr + /// + public static unsafe uint SaturatingIncrementBy64BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy64BitElementCount(value, scale, pattern); + + /// + /// uint64_t svqincd_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// UQINCD Xtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_uqincd - not implemented in coreclr + /// + public static unsafe ulong SaturatingIncrementBy64BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy64BitElementCount(value, scale, pattern); + + /// + /// svint64_t svqincd_pat[_s64](svint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// SQINCD Ztied.D, pattern, MUL #imm_factor + /// MOVPRFX Zresult, Zop; SQINCD Zresult.D, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_sqincd - not implemented in coreclr + /// + public static unsafe Vector SaturatingIncrementBy64BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy64BitElementCount(value, scale, pattern); + + /// + /// svuint64_t svqincd_pat[_u64](svuint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// UQINCD Ztied.D, pattern, MUL #imm_factor + /// MOVPRFX Zresult, Zop; UQINCD Zresult.D, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_uqincd - not implemented in coreclr + /// + public static unsafe Vector SaturatingIncrementBy64BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy64BitElementCount(value, scale, pattern); + + + /// SaturatingIncrementBy8BitElementCount : Saturating increment by number of byte elements + + /// + /// int32_t svqincb_pat[_n_s32](int32_t op, enum svpattern pattern, uint64_t imm_factor) + /// SQINCB Xtied, Wtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_sqincb - not implemented in coreclr + /// + public static unsafe int SaturatingIncrementBy8BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy8BitElementCount(value, scale, pattern); + + /// + /// int64_t svqincb_pat[_n_s64](int64_t op, enum svpattern pattern, uint64_t imm_factor) + /// SQINCB Xtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_sqincb - not implemented in coreclr + /// + public static unsafe long SaturatingIncrementBy8BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy8BitElementCount(value, scale, pattern); + + /// + /// uint32_t svqincb_pat[_n_u32](uint32_t op, enum svpattern pattern, uint64_t imm_factor) + /// UQINCB Wtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_uqincb - not implemented in coreclr + /// + public static unsafe uint SaturatingIncrementBy8BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy8BitElementCount(value, scale, pattern); + + /// + /// uint64_t svqincb_pat[_n_u64](uint64_t op, enum svpattern pattern, uint64_t imm_factor) + /// UQINCB Xtied, pattern, MUL #imm_factor + /// + /// codegenarm64test: + /// sve_uqincb - not implemented in coreclr + /// + public static unsafe ulong SaturatingIncrementBy8BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => SaturatingIncrementBy8BitElementCount(value, scale, pattern); + + + /// SaturatingIncrementByActiveElementCount : Saturating increment by active element count + + /// + /// svint16_t svqincp[_s16](svint16_t op, svbool_t pg) + /// SQINCP Ztied.H, Pg + /// MOVPRFX Zresult, Zop; SQINCP Zresult.H, Pg + /// + /// codegenarm64test: + /// IF_SVE_DO_2A SQINCP , ., + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A SQINCP ., . + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V3, REG_P3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V4, REG_P4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V5, REG_P5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingIncrementByActiveElementCount(Vector value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// svint32_t svqincp[_s32](svint32_t op, svbool_t pg) + /// SQINCP Ztied.S, Pg + /// MOVPRFX Zresult, Zop; SQINCP Zresult.S, Pg + /// + /// codegenarm64test: + /// IF_SVE_DO_2A SQINCP , ., + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A SQINCP ., . + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V3, REG_P3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V4, REG_P4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V5, REG_P5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingIncrementByActiveElementCount(Vector value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// svint64_t svqincp[_s64](svint64_t op, svbool_t pg) + /// SQINCP Ztied.D, Pg + /// MOVPRFX Zresult, Zop; SQINCP Zresult.D, Pg + /// + /// codegenarm64test: + /// IF_SVE_DO_2A SQINCP , ., + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A SQINCP ., . + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V3, REG_P3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V4, REG_P4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V5, REG_P5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingIncrementByActiveElementCount(Vector value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// int32_t svqincp[_n_s32]_b8(int32_t op, svbool_t pg) + /// SQINCP Xtied, Pg.B, Wtied + /// + /// codegenarm64test: + /// IF_SVE_DO_2A SQINCP , ., + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A SQINCP ., . + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V3, REG_P3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V4, REG_P4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V5, REG_P5, INS_OPTS_SCALABLE_D); + /// + public static unsafe int SaturatingIncrementByActiveElementCount(int value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// int64_t svqincp[_n_s64]_b8(int64_t op, svbool_t pg) + /// SQINCP Xtied, Pg.B + /// + /// codegenarm64test: + /// IF_SVE_DO_2A SQINCP , ., + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A SQINCP ., . + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V3, REG_P3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V4, REG_P4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V5, REG_P5, INS_OPTS_SCALABLE_D); + /// + public static unsafe long SaturatingIncrementByActiveElementCount(long value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// uint32_t svqincp[_n_u32]_b8(uint32_t op, svbool_t pg) + /// UQINCP Wtied, Pg.B + /// + /// codegenarm64test: + /// IF_SVE_DO_2A UQINCP , . + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A UQINCP ., . + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V9, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V10, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V11, REG_P3, INS_OPTS_SCALABLE_D); + /// + public static unsafe uint SaturatingIncrementByActiveElementCount(uint value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// uint64_t svqincp[_n_u64]_b8(uint64_t op, svbool_t pg) + /// UQINCP Xtied, Pg.B + /// + /// codegenarm64test: + /// IF_SVE_DO_2A UQINCP , . + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A UQINCP ., . + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V9, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V10, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V11, REG_P3, INS_OPTS_SCALABLE_D); + /// + public static unsafe ulong SaturatingIncrementByActiveElementCount(ulong value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// int32_t svqincp[_n_s32]_b16(int32_t op, svbool_t pg) + /// SQINCP Xtied, Pg.H, Wtied + /// + /// codegenarm64test: + /// IF_SVE_DO_2A SQINCP , ., + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A SQINCP ., . + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V3, REG_P3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V4, REG_P4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V5, REG_P5, INS_OPTS_SCALABLE_D); + /// + public static unsafe int SaturatingIncrementByActiveElementCount(int value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// int64_t svqincp[_n_s64]_b16(int64_t op, svbool_t pg) + /// SQINCP Xtied, Pg.H + /// + /// codegenarm64test: + /// IF_SVE_DO_2A SQINCP , ., + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A SQINCP ., . + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V3, REG_P3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V4, REG_P4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V5, REG_P5, INS_OPTS_SCALABLE_D); + /// + public static unsafe long SaturatingIncrementByActiveElementCount(long value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// uint32_t svqincp[_n_u32]_b16(uint32_t op, svbool_t pg) + /// UQINCP Wtied, Pg.H + /// + /// codegenarm64test: + /// IF_SVE_DO_2A UQINCP , . + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A UQINCP ., . + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V9, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V10, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V11, REG_P3, INS_OPTS_SCALABLE_D); + /// + public static unsafe uint SaturatingIncrementByActiveElementCount(uint value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// uint64_t svqincp[_n_u64]_b16(uint64_t op, svbool_t pg) + /// UQINCP Xtied, Pg.H + /// + /// codegenarm64test: + /// IF_SVE_DO_2A UQINCP , . + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A UQINCP ., . + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V9, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V10, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V11, REG_P3, INS_OPTS_SCALABLE_D); + /// + public static unsafe ulong SaturatingIncrementByActiveElementCount(ulong value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// svuint16_t svqincp[_u16](svuint16_t op, svbool_t pg) + /// UQINCP Ztied.H, Pg + /// MOVPRFX Zresult, Zop; UQINCP Zresult.H, Pg + /// + /// codegenarm64test: + /// IF_SVE_DO_2A UQINCP , . + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A UQINCP ., . + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V9, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V10, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V11, REG_P3, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingIncrementByActiveElementCount(Vector value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// int32_t svqincp[_n_s32]_b32(int32_t op, svbool_t pg) + /// SQINCP Xtied, Pg.S, Wtied + /// + /// codegenarm64test: + /// IF_SVE_DO_2A SQINCP , ., + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A SQINCP ., . + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V3, REG_P3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V4, REG_P4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V5, REG_P5, INS_OPTS_SCALABLE_D); + /// + public static unsafe int SaturatingIncrementByActiveElementCount(int value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// int64_t svqincp[_n_s64]_b32(int64_t op, svbool_t pg) + /// SQINCP Xtied, Pg.S + /// + /// codegenarm64test: + /// IF_SVE_DO_2A SQINCP , ., + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A SQINCP ., . + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V3, REG_P3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V4, REG_P4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V5, REG_P5, INS_OPTS_SCALABLE_D); + /// + public static unsafe long SaturatingIncrementByActiveElementCount(long value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// uint32_t svqincp[_n_u32]_b32(uint32_t op, svbool_t pg) + /// UQINCP Wtied, Pg.S + /// + /// codegenarm64test: + /// IF_SVE_DO_2A UQINCP , . + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A UQINCP ., . + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V9, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V10, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V11, REG_P3, INS_OPTS_SCALABLE_D); + /// + public static unsafe uint SaturatingIncrementByActiveElementCount(uint value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// uint64_t svqincp[_n_u64]_b32(uint64_t op, svbool_t pg) + /// UQINCP Xtied, Pg.S + /// + /// codegenarm64test: + /// IF_SVE_DO_2A UQINCP , . + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A UQINCP ., . + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V9, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V10, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V11, REG_P3, INS_OPTS_SCALABLE_D); + /// + public static unsafe ulong SaturatingIncrementByActiveElementCount(ulong value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// svuint32_t svqincp[_u32](svuint32_t op, svbool_t pg) + /// UQINCP Ztied.S, Pg + /// MOVPRFX Zresult, Zop; UQINCP Zresult.S, Pg + /// + /// codegenarm64test: + /// IF_SVE_DO_2A UQINCP , . + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A UQINCP ., . + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V9, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V10, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V11, REG_P3, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingIncrementByActiveElementCount(Vector value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// int32_t svqincp[_n_s32]_b64(int32_t op, svbool_t pg) + /// SQINCP Xtied, Pg.D, Wtied + /// + /// codegenarm64test: + /// IF_SVE_DO_2A SQINCP , ., + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A SQINCP ., . + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V3, REG_P3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V4, REG_P4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V5, REG_P5, INS_OPTS_SCALABLE_D); + /// + public static unsafe int SaturatingIncrementByActiveElementCount(int value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// int64_t svqincp[_n_s64]_b64(int64_t op, svbool_t pg) + /// SQINCP Xtied, Pg.D + /// + /// codegenarm64test: + /// IF_SVE_DO_2A SQINCP , ., + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A SQINCP ., . + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V3, REG_P3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V4, REG_P4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_sqincp, EA_SCALABLE, REG_V5, REG_P5, INS_OPTS_SCALABLE_D); + /// + public static unsafe long SaturatingIncrementByActiveElementCount(long value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// uint32_t svqincp[_n_u32]_b64(uint32_t op, svbool_t pg) + /// UQINCP Wtied, Pg.D + /// + /// codegenarm64test: + /// IF_SVE_DO_2A UQINCP , . + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A UQINCP ., . + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V9, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V10, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V11, REG_P3, INS_OPTS_SCALABLE_D); + /// + public static unsafe uint SaturatingIncrementByActiveElementCount(uint value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// uint64_t svqincp[_n_u64]_b64(uint64_t op, svbool_t pg) + /// UQINCP Xtied, Pg.D + /// + /// codegenarm64test: + /// IF_SVE_DO_2A UQINCP , . + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A UQINCP ., . + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V9, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V10, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V11, REG_P3, INS_OPTS_SCALABLE_D); + /// + public static unsafe ulong SaturatingIncrementByActiveElementCount(ulong value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + /// + /// svuint64_t svqincp[_u64](svuint64_t op, svbool_t pg) + /// UQINCP Ztied.D, Pg + /// MOVPRFX Zresult, Zop; UQINCP Zresult.D, Pg + /// + /// codegenarm64test: + /// IF_SVE_DO_2A UQINCP , . + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_4BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D); + /// IF_SVE_DP_2A UQINCP ., . + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V9, REG_P1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V10, REG_P2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_uqincp, EA_SCALABLE, REG_V11, REG_P3, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingIncrementByActiveElementCount(Vector value, Vector from) => SaturatingIncrementByActiveElementCount(value, from); + + + /// Scale : Adjust exponent + + /// + /// svfloat32_t svscale[_f32]_m(svbool_t pg, svfloat32_t op1, svint32_t op2) + /// FSCALE Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; FSCALE Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svfloat32_t svscale[_f32]_x(svbool_t pg, svfloat32_t op1, svint32_t op2) + /// FSCALE Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; FSCALE Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svfloat32_t svscale[_f32]_z(svbool_t pg, svfloat32_t op1, svint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; FSCALE Zresult.S, Pg/M, Zresult.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FSCALE ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fscale, EA_SCALABLE, REG_V4, REG_P6, REG_V31, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Scale(Vector left, Vector right) => Scale(left, right); + + /// + /// svfloat64_t svscale[_f64]_m(svbool_t pg, svfloat64_t op1, svint64_t op2) + /// FSCALE Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; FSCALE Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svfloat64_t svscale[_f64]_x(svbool_t pg, svfloat64_t op1, svint64_t op2) + /// FSCALE Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; FSCALE Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svfloat64_t svscale[_f64]_z(svbool_t pg, svfloat64_t op1, svint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; FSCALE Zresult.D, Pg/M, Zresult.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FSCALE ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fscale, EA_SCALABLE, REG_V4, REG_P6, REG_V31, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Scale(Vector left, Vector right) => Scale(left, right); + + + /// Scatter : Non-truncating store + + /// + /// void svst1_scatter_[s32]offset[_s32](svbool_t pg, int32_t *base, svint32_t offsets, svint32_t data) + /// ST1W Zdata.S, Pg, [Xbase, Zoffsets.S, SXTW] + /// void svst1_scatter_[s32]index[_s32](svbool_t pg, int32_t *base, svint32_t indices, svint32_t data) + /// ST1W Zdata.S, Pg, [Xbase, Zindices.S, SXTW #2] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1W {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P4, REG_R5, 6, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JD_4B ST1W {.}, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1W {.S }, , [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1W {.D }, , [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1W {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1W {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3B ST1W {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_JD_4C ST1W {.Q }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R8, REG_R7, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1W {.D }, , [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1W {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P5, REG_R1, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1W {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter(Vector mask, int* address, Vector indicies, Vector data) => Scatter(mask, address, indicies, data); + + /// + /// void svst1_scatter[_u32base_s32](svbool_t pg, svuint32_t bases, svint32_t data) + /// ST1W Zdata.S, Pg, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1W {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P4, REG_R5, 6, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JD_4B ST1W {.}, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1W {.S }, , [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1W {.D }, , [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1W {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1W {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3B ST1W {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_JD_4C ST1W {.Q }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R8, REG_R7, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1W {.D }, , [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1W {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P5, REG_R1, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1W {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter(Vector mask, Vector addresses, Vector data) => Scatter(mask, addresses, data); + + /// + /// void svst1_scatter_[u32]offset[_s32](svbool_t pg, int32_t *base, svuint32_t offsets, svint32_t data) + /// ST1W Zdata.S, Pg, [Xbase, Zoffsets.S, UXTW] + /// void svst1_scatter_[u32]index[_s32](svbool_t pg, int32_t *base, svuint32_t indices, svint32_t data) + /// ST1W Zdata.S, Pg, [Xbase, Zindices.S, UXTW #2] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1W {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P4, REG_R5, 6, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JD_4B ST1W {.}, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1W {.S }, , [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1W {.D }, , [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1W {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1W {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3B ST1W {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_JD_4C ST1W {.Q }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R8, REG_R7, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1W {.D }, , [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1W {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P5, REG_R1, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1W {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter(Vector mask, int* address, Vector indicies, Vector data) => Scatter(mask, address, indicies, data); + + /// + /// void svst1_scatter_[s64]offset[_s64](svbool_t pg, int64_t *base, svint64_t offsets, svint64_t data) + /// ST1D Zdata.D, Pg, [Xbase, Zoffsets.D] + /// void svst1_scatter_[s64]index[_s64](svbool_t pg, int64_t *base, svint64_t indices, svint64_t data) + /// ST1D Zdata.D, Pg, [Xbase, Zindices.D, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JN_3C_D ST1D {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V2, REG_P1, REG_R0, 0, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JJ_4A ST1D {.D }, , [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1D {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P1, REG_R2, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P1, REG_R2, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JD_4C ST1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V1, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JD_4C_A ST1D {.Q }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P5, REG_R6, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1D {.D }, , [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_C ST1D {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V6, INS_OPTS_SCALABLE_D); + /// IF_SVE_JL_3A ST1D {.D }, , [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P7, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P7, REG_V4, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter(Vector mask, long* address, Vector indicies, Vector data) => Scatter(mask, address, indicies, data); + + /// + /// void svst1_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) + /// ST1D Zdata.D, Pg, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JN_3C_D ST1D {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V2, REG_P1, REG_R0, 0, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JJ_4A ST1D {.D }, , [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1D {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P1, REG_R2, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P1, REG_R2, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JD_4C ST1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V1, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JD_4C_A ST1D {.Q }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P5, REG_R6, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1D {.D }, , [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_C ST1D {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V6, INS_OPTS_SCALABLE_D); + /// IF_SVE_JL_3A ST1D {.D }, , [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P7, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P7, REG_V4, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter(Vector mask, Vector addresses, Vector data) => Scatter(mask, addresses, data); + + /// + /// void svst1_scatter_[u64]offset[_s64](svbool_t pg, int64_t *base, svuint64_t offsets, svint64_t data) + /// ST1D Zdata.D, Pg, [Xbase, Zoffsets.D] + /// void svst1_scatter_[u64]index[_s64](svbool_t pg, int64_t *base, svuint64_t indices, svint64_t data) + /// ST1D Zdata.D, Pg, [Xbase, Zindices.D, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JN_3C_D ST1D {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V2, REG_P1, REG_R0, 0, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JJ_4A ST1D {.D }, , [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1D {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P1, REG_R2, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P1, REG_R2, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JD_4C ST1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V1, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JD_4C_A ST1D {.Q }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P5, REG_R6, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1D {.D }, , [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_C ST1D {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V6, INS_OPTS_SCALABLE_D); + /// IF_SVE_JL_3A ST1D {.D }, , [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P7, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P7, REG_V4, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter(Vector mask, long* address, Vector indicies, Vector data) => Scatter(mask, address, indicies, data); + + /// + /// void svst1_scatter_[s32]offset[_u32](svbool_t pg, uint32_t *base, svint32_t offsets, svuint32_t data) + /// ST1W Zdata.S, Pg, [Xbase, Zoffsets.S, SXTW] + /// void svst1_scatter_[s32]index[_u32](svbool_t pg, uint32_t *base, svint32_t indices, svuint32_t data) + /// ST1W Zdata.S, Pg, [Xbase, Zindices.S, SXTW #2] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1W {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P4, REG_R5, 6, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JD_4B ST1W {.}, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1W {.S }, , [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1W {.D }, , [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1W {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1W {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3B ST1W {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_JD_4C ST1W {.Q }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R8, REG_R7, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1W {.D }, , [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1W {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P5, REG_R1, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1W {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter(Vector mask, uint* address, Vector indicies, Vector data) => Scatter(mask, address, indicies, data); + + /// + /// void svst1_scatter[_u32base_u32](svbool_t pg, svuint32_t bases, svuint32_t data) + /// ST1W Zdata.S, Pg, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1W {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P4, REG_R5, 6, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JD_4B ST1W {.}, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1W {.S }, , [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1W {.D }, , [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1W {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1W {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3B ST1W {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_JD_4C ST1W {.Q }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R8, REG_R7, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1W {.D }, , [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1W {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P5, REG_R1, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1W {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter(Vector mask, Vector addresses, Vector data) => Scatter(mask, addresses, data); + + /// + /// void svst1_scatter_[u32]offset[_u32](svbool_t pg, uint32_t *base, svuint32_t offsets, svuint32_t data) + /// ST1W Zdata.S, Pg, [Xbase, Zoffsets.S, UXTW] + /// void svst1_scatter_[u32]index[_u32](svbool_t pg, uint32_t *base, svuint32_t indices, svuint32_t data) + /// ST1W Zdata.S, Pg, [Xbase, Zindices.S, UXTW #2] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1W {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P4, REG_R5, 6, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JD_4B ST1W {.}, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1W {.S }, , [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1W {.D }, , [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1W {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1W {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3B ST1W {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_JD_4C ST1W {.Q }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R8, REG_R7, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1W {.D }, , [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1W {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P5, REG_R1, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1W {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter(Vector mask, uint* address, Vector indicies, Vector data) => Scatter(mask, address, indicies, data); + + /// + /// void svst1_scatter_[s64]offset[_u64](svbool_t pg, uint64_t *base, svint64_t offsets, svuint64_t data) + /// ST1D Zdata.D, Pg, [Xbase, Zoffsets.D] + /// void svst1_scatter_[s64]index[_u64](svbool_t pg, uint64_t *base, svint64_t indices, svuint64_t data) + /// ST1D Zdata.D, Pg, [Xbase, Zindices.D, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JN_3C_D ST1D {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V2, REG_P1, REG_R0, 0, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JJ_4A ST1D {.D }, , [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1D {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P1, REG_R2, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P1, REG_R2, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JD_4C ST1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V1, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JD_4C_A ST1D {.Q }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P5, REG_R6, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1D {.D }, , [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_C ST1D {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V6, INS_OPTS_SCALABLE_D); + /// IF_SVE_JL_3A ST1D {.D }, , [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P7, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P7, REG_V4, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter(Vector mask, ulong* address, Vector indicies, Vector data) => Scatter(mask, address, indicies, data); + + /// + /// void svst1_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) + /// ST1D Zdata.D, Pg, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JN_3C_D ST1D {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V2, REG_P1, REG_R0, 0, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JJ_4A ST1D {.D }, , [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1D {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P1, REG_R2, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P1, REG_R2, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JD_4C ST1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V1, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JD_4C_A ST1D {.Q }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P5, REG_R6, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1D {.D }, , [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_C ST1D {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V6, INS_OPTS_SCALABLE_D); + /// IF_SVE_JL_3A ST1D {.D }, , [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P7, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P7, REG_V4, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter(Vector mask, Vector addresses, Vector data) => Scatter(mask, addresses, data); + + /// + /// void svst1_scatter_[u64]offset[_u64](svbool_t pg, uint64_t *base, svuint64_t offsets, svuint64_t data) + /// ST1D Zdata.D, Pg, [Xbase, Zoffsets.D] + /// void svst1_scatter_[u64]index[_u64](svbool_t pg, uint64_t *base, svuint64_t indices, svuint64_t data) + /// ST1D Zdata.D, Pg, [Xbase, Zindices.D, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JN_3C_D ST1D {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V2, REG_P1, REG_R0, 0, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JJ_4A ST1D {.D }, , [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1D {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P1, REG_R2, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P1, REG_R2, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JD_4C ST1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V1, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JD_4C_A ST1D {.Q }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P5, REG_R6, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1D {.D }, , [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_C ST1D {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V6, INS_OPTS_SCALABLE_D); + /// IF_SVE_JL_3A ST1D {.D }, , [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P7, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P7, REG_V4, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter(Vector mask, ulong* address, Vector indicies, Vector data) => Scatter(mask, address, indicies, data); + + /// + /// void svst1_scatter_[s32]offset[_f32](svbool_t pg, float32_t *base, svint32_t offsets, svfloat32_t data) + /// ST1W Zdata.S, Pg, [Xbase, Zoffsets.S, SXTW] + /// void svst1_scatter_[s32]index[_f32](svbool_t pg, float32_t *base, svint32_t indices, svfloat32_t data) + /// ST1W Zdata.S, Pg, [Xbase, Zindices.S, SXTW #2] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1W {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P4, REG_R5, 6, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JD_4B ST1W {.}, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1W {.S }, , [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1W {.D }, , [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1W {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1W {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3B ST1W {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_JD_4C ST1W {.Q }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R8, REG_R7, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1W {.D }, , [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1W {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P5, REG_R1, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1W {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter(Vector mask, float* address, Vector indicies, Vector data) => Scatter(mask, address, indicies, data); + + /// + /// void svst1_scatter[_u32base_f32](svbool_t pg, svuint32_t bases, svfloat32_t data) + /// ST1W Zdata.S, Pg, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1W {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P4, REG_R5, 6, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JD_4B ST1W {.}, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1W {.S }, , [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1W {.D }, , [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1W {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1W {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3B ST1W {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_JD_4C ST1W {.Q }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R8, REG_R7, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1W {.D }, , [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1W {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P5, REG_R1, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1W {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter(Vector mask, Vector addresses, Vector data) => Scatter(mask, addresses, data); + + /// + /// void svst1_scatter_[u32]offset[_f32](svbool_t pg, float32_t *base, svuint32_t offsets, svfloat32_t data) + /// ST1W Zdata.S, Pg, [Xbase, Zoffsets.S, UXTW] + /// void svst1_scatter_[u32]index[_f32](svbool_t pg, float32_t *base, svuint32_t indices, svfloat32_t data) + /// ST1W Zdata.S, Pg, [Xbase, Zindices.S, UXTW #2] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1W {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P4, REG_R5, 6, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JD_4B ST1W {.}, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1W {.S }, , [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1W {.D }, , [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1W {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1W {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3B ST1W {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_JD_4C ST1W {.Q }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R8, REG_R7, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1W {.D }, , [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1W {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P5, REG_R1, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1W {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter(Vector mask, float* address, Vector indicies, Vector data) => Scatter(mask, address, indicies, data); + + /// + /// void svst1_scatter_[s64]offset[_f64](svbool_t pg, float64_t *base, svint64_t offsets, svfloat64_t data) + /// ST1D Zdata.D, Pg, [Xbase, Zoffsets.D] + /// void svst1_scatter_[s64]index[_f64](svbool_t pg, float64_t *base, svint64_t indices, svfloat64_t data) + /// ST1D Zdata.D, Pg, [Xbase, Zindices.D, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JN_3C_D ST1D {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V2, REG_P1, REG_R0, 0, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JJ_4A ST1D {.D }, , [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1D {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P1, REG_R2, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P1, REG_R2, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JD_4C ST1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V1, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JD_4C_A ST1D {.Q }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P5, REG_R6, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1D {.D }, , [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_C ST1D {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V6, INS_OPTS_SCALABLE_D); + /// IF_SVE_JL_3A ST1D {.D }, , [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P7, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P7, REG_V4, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter(Vector mask, double* address, Vector indicies, Vector data) => Scatter(mask, address, indicies, data); + + /// + /// void svst1_scatter[_u64base_f64](svbool_t pg, svuint64_t bases, svfloat64_t data) + /// ST1D Zdata.D, Pg, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JN_3C_D ST1D {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V2, REG_P1, REG_R0, 0, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JJ_4A ST1D {.D }, , [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1D {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P1, REG_R2, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P1, REG_R2, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JD_4C ST1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V1, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JD_4C_A ST1D {.Q }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P5, REG_R6, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1D {.D }, , [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_C ST1D {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V6, INS_OPTS_SCALABLE_D); + /// IF_SVE_JL_3A ST1D {.D }, , [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P7, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P7, REG_V4, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter(Vector mask, Vector addresses, Vector data) => Scatter(mask, addresses, data); + + /// + /// void svst1_scatter_[u64]offset[_f64](svbool_t pg, float64_t *base, svuint64_t offsets, svfloat64_t data) + /// ST1D Zdata.D, Pg, [Xbase, Zoffsets.D] + /// void svst1_scatter_[u64]index[_f64](svbool_t pg, float64_t *base, svuint64_t indices, svfloat64_t data) + /// ST1D Zdata.D, Pg, [Xbase, Zindices.D, LSL #3] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JN_3C_D ST1D {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V2, REG_P1, REG_R0, 0, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JJ_4A ST1D {.D }, , [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1D {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P1, REG_R2, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P1, REG_R2, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JD_4C ST1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V1, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JD_4C_A ST1D {.Q }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P5, REG_R6, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1D {.D }, , [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_C ST1D {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V6, INS_OPTS_SCALABLE_D); + /// IF_SVE_JL_3A ST1D {.D }, , [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P7, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P7, REG_V4, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter(Vector mask, double* address, Vector indicies, Vector data) => Scatter(mask, address, indicies, data); + + + /// Scatter16BitNarrowing : Truncate to 16 bits and store + + /// + /// void svst1h_scatter[_u32base_s32](svbool_t pg, svuint32_t bases, svint32_t data) + /// ST1H Zdata.S, Pg, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter16BitNarrowing(mask, addresses, data); + + /// + /// void svst1h_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) + /// ST1H Zdata.D, Pg, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter16BitNarrowing(mask, addresses, data); + + /// + /// void svst1h_scatter[_u32base_u32](svbool_t pg, svuint32_t bases, svuint32_t data) + /// ST1H Zdata.S, Pg, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter16BitNarrowing(mask, addresses, data); + + /// + /// void svst1h_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) + /// ST1H Zdata.D, Pg, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter16BitNarrowing(mask, addresses, data); + + + /// Scatter16BitWithByteOffsetsNarrowing : Truncate to 16 bits and store + + /// + /// void svst1h_scatter_[s32]offset[_s32](svbool_t pg, int16_t *base, svint32_t offsets, svint32_t data) + /// ST1H Zdata.S, Pg, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1h_scatter_[u32]offset[_s32](svbool_t pg, int16_t *base, svuint32_t offsets, svint32_t data) + /// ST1H Zdata.S, Pg, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1h_scatter_[s32]index[_s32](svbool_t pg, int16_t *base, svint32_t indices, svint32_t data) + /// ST1H Zdata.S, Pg, [Xbase, Zindices.S, SXTW #1] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svst1h_scatter_[u32]index[_s32](svbool_t pg, int16_t *base, svuint32_t indices, svint32_t data) + /// ST1H Zdata.S, Pg, [Xbase, Zindices.S, UXTW #1] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svst1h_scatter_[s64]offset[_s64](svbool_t pg, int16_t *base, svint64_t offsets, svint64_t data) + /// ST1H Zdata.D, Pg, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1h_scatter_[u64]offset[_s64](svbool_t pg, int16_t *base, svuint64_t offsets, svint64_t data) + /// ST1H Zdata.D, Pg, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1h_scatter_[s64]index[_s64](svbool_t pg, int16_t *base, svint64_t indices, svint64_t data) + /// ST1H Zdata.D, Pg, [Xbase, Zindices.D, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svst1h_scatter_[u64]index[_s64](svbool_t pg, int16_t *base, svuint64_t indices, svint64_t data) + /// ST1H Zdata.D, Pg, [Xbase, Zindices.D, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svst1h_scatter_[s32]offset[_u32](svbool_t pg, uint16_t *base, svint32_t offsets, svuint32_t data) + /// ST1H Zdata.S, Pg, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1h_scatter_[u32]offset[_u32](svbool_t pg, uint16_t *base, svuint32_t offsets, svuint32_t data) + /// ST1H Zdata.S, Pg, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1h_scatter_[s32]index[_u32](svbool_t pg, uint16_t *base, svint32_t indices, svuint32_t data) + /// ST1H Zdata.S, Pg, [Xbase, Zindices.S, SXTW #1] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svst1h_scatter_[u32]index[_u32](svbool_t pg, uint16_t *base, svuint32_t indices, svuint32_t data) + /// ST1H Zdata.S, Pg, [Xbase, Zindices.S, UXTW #1] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svst1h_scatter_[s64]offset[_u64](svbool_t pg, uint16_t *base, svint64_t offsets, svuint64_t data) + /// ST1H Zdata.D, Pg, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1h_scatter_[u64]offset[_u64](svbool_t pg, uint16_t *base, svuint64_t offsets, svuint64_t data) + /// ST1H Zdata.D, Pg, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1h_scatter_[s64]index[_u64](svbool_t pg, uint16_t *base, svint64_t indices, svuint64_t data) + /// ST1H Zdata.D, Pg, [Xbase, Zindices.D, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svst1h_scatter_[u64]index[_u64](svbool_t pg, uint16_t *base, svuint64_t indices, svuint64_t data) + /// ST1H Zdata.D, Pg, [Xbase, Zindices.D, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, indices, data); + + + /// Scatter32BitNarrowing : Truncate to 32 bits and store + + /// + /// void svst1w_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) + /// ST1W Zdata.D, Pg, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1W {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P4, REG_R5, 6, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JD_4B ST1W {.}, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1W {.S }, , [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1W {.D }, , [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1W {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1W {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3B ST1W {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_JD_4C ST1W {.Q }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R8, REG_R7, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1W {.D }, , [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1W {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P5, REG_R1, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1W {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter32BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter32BitNarrowing(mask, addresses, data); + + /// + /// void svst1w_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) + /// ST1W Zdata.D, Pg, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1W {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P4, REG_R5, 6, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JD_4B ST1W {.}, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1W {.S }, , [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1W {.D }, , [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1W {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1W {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3B ST1W {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_JD_4C ST1W {.Q }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R8, REG_R7, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1W {.D }, , [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1W {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P5, REG_R1, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1W {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter32BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter32BitNarrowing(mask, addresses, data); + + + /// Scatter32BitWithByteOffsetsNarrowing : Truncate to 32 bits and store + + /// + /// void svst1w_scatter_[s64]offset[_s64](svbool_t pg, int32_t *base, svint64_t offsets, svint64_t data) + /// ST1W Zdata.D, Pg, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1W {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P4, REG_R5, 6, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JD_4B ST1W {.}, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1W {.S }, , [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1W {.D }, , [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1W {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1W {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3B ST1W {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_JD_4C ST1W {.Q }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R8, REG_R7, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1W {.D }, , [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1W {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P5, REG_R1, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1W {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector offsets, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1w_scatter_[u64]offset[_s64](svbool_t pg, int32_t *base, svuint64_t offsets, svint64_t data) + /// ST1W Zdata.D, Pg, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1W {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P4, REG_R5, 6, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JD_4B ST1W {.}, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1W {.S }, , [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1W {.D }, , [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1W {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1W {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3B ST1W {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_JD_4C ST1W {.Q }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R8, REG_R7, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1W {.D }, , [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1W {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P5, REG_R1, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1W {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector offsets, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1w_scatter_[s64]index[_s64](svbool_t pg, int32_t *base, svint64_t indices, svint64_t data) + /// ST1W Zdata.D, Pg, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1W {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P4, REG_R5, 6, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JD_4B ST1W {.}, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1W {.S }, , [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1W {.D }, , [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1W {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1W {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3B ST1W {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_JD_4C ST1W {.Q }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R8, REG_R7, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1W {.D }, , [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1W {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P5, REG_R1, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1W {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector indices, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svst1w_scatter_[u64]index[_s64](svbool_t pg, int32_t *base, svuint64_t indices, svint64_t data) + /// ST1W Zdata.D, Pg, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1W {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P4, REG_R5, 6, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JD_4B ST1W {.}, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1W {.S }, , [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1W {.D }, , [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1W {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1W {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3B ST1W {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_JD_4C ST1W {.Q }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R8, REG_R7, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1W {.D }, , [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1W {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P5, REG_R1, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1W {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector indices, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svst1w_scatter_[s64]offset[_u64](svbool_t pg, uint32_t *base, svint64_t offsets, svuint64_t data) + /// ST1W Zdata.D, Pg, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1W {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P4, REG_R5, 6, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JD_4B ST1W {.}, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1W {.S }, , [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1W {.D }, , [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1W {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1W {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3B ST1W {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_JD_4C ST1W {.Q }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R8, REG_R7, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1W {.D }, , [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1W {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P5, REG_R1, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1W {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector offsets, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1w_scatter_[u64]offset[_u64](svbool_t pg, uint32_t *base, svuint64_t offsets, svuint64_t data) + /// ST1W Zdata.D, Pg, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1W {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P4, REG_R5, 6, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JD_4B ST1W {.}, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1W {.S }, , [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1W {.D }, , [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1W {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1W {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3B ST1W {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_JD_4C ST1W {.Q }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R8, REG_R7, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1W {.D }, , [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1W {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P5, REG_R1, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1W {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector offsets, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1w_scatter_[s64]index[_u64](svbool_t pg, uint32_t *base, svint64_t indices, svuint64_t data) + /// ST1W Zdata.D, Pg, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1W {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P4, REG_R5, 6, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JD_4B ST1W {.}, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1W {.S }, , [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1W {.D }, , [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1W {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1W {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3B ST1W {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_JD_4C ST1W {.Q }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R8, REG_R7, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1W {.D }, , [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1W {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P5, REG_R1, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1W {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector indices, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svst1w_scatter_[u64]index[_u64](svbool_t pg, uint32_t *base, svuint64_t indices, svuint64_t data) + /// ST1W Zdata.D, Pg, [Xbase, Zindices.D, LSL #2] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1W {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P4, REG_R5, 6, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JD_4B ST1W {.}, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1W {.S }, , [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1W {.D }, , [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1W {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1W {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3B ST1W {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_JD_4C ST1W {.Q }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R8, REG_R7, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1W {.D }, , [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1W {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P5, REG_R1, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1W {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector indices, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, indices, data); + + + /// Scatter8BitNarrowing : Truncate to 8 bits and store + + /// + /// void svst1b_scatter[_u32base_s32](svbool_t pg, svuint32_t bases, svint32_t data) + /// ST1B Zdata.S, Pg, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1B {.}, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_R0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_R2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P5, REG_R7, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P0, REG_R1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4A ST1B {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JK_4A_B ST1B {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1B {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4B ST1B {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P3, REG_R0, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1B {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter8BitNarrowing(mask, addresses, data); + + /// + /// void svst1b_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) + /// ST1B Zdata.D, Pg, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1B {.}, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_R0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_R2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P5, REG_R7, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P0, REG_R1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4A ST1B {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JK_4A_B ST1B {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1B {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4B ST1B {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P3, REG_R0, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1B {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter8BitNarrowing(mask, addresses, data); + + /// + /// void svst1b_scatter[_u32base_u32](svbool_t pg, svuint32_t bases, svuint32_t data) + /// ST1B Zdata.S, Pg, [Zbases.S, #0] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1B {.}, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_R0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_R2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P5, REG_R7, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P0, REG_R1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4A ST1B {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JK_4A_B ST1B {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1B {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4B ST1B {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P3, REG_R0, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1B {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter8BitNarrowing(mask, addresses, data); + + /// + /// void svst1b_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) + /// ST1B Zdata.D, Pg, [Zbases.D, #0] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1B {.}, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_R0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_R2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P5, REG_R7, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P0, REG_R1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4A ST1B {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JK_4A_B ST1B {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1B {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4B ST1B {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P3, REG_R0, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1B {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter8BitNarrowing(mask, addresses, data); + + + /// Scatter8BitWithByteOffsetsNarrowing : Truncate to 8 bits and store + + /// + /// void svst1b_scatter_[s32]offset[_s32](svbool_t pg, int8_t *base, svint32_t offsets, svint32_t data) + /// ST1B Zdata.S, Pg, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1B {.}, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_R0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_R2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P5, REG_R7, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P0, REG_R1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4A ST1B {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JK_4A_B ST1B {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1B {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4B ST1B {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P3, REG_R0, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1B {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1b_scatter_[u32]offset[_s32](svbool_t pg, int8_t *base, svuint32_t offsets, svint32_t data) + /// ST1B Zdata.S, Pg, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1B {.}, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_R0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_R2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P5, REG_R7, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P0, REG_R1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4A ST1B {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JK_4A_B ST1B {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1B {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4B ST1B {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P3, REG_R0, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1B {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1b_scatter_[s64]offset[_s64](svbool_t pg, int8_t *base, svint64_t offsets, svint64_t data) + /// ST1B Zdata.D, Pg, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1B {.}, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_R0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_R2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P5, REG_R7, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P0, REG_R1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4A ST1B {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JK_4A_B ST1B {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1B {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4B ST1B {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P3, REG_R0, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1B {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1b_scatter_[u64]offset[_s64](svbool_t pg, int8_t *base, svuint64_t offsets, svint64_t data) + /// ST1B Zdata.D, Pg, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1B {.}, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_R0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_R2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P5, REG_R7, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P0, REG_R1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4A ST1B {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JK_4A_B ST1B {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1B {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4B ST1B {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P3, REG_R0, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1B {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1b_scatter_[s32]offset[_u32](svbool_t pg, uint8_t *base, svint32_t offsets, svuint32_t data) + /// ST1B Zdata.S, Pg, [Xbase, Zoffsets.S, SXTW] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1B {.}, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_R0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_R2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P5, REG_R7, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P0, REG_R1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4A ST1B {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JK_4A_B ST1B {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1B {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4B ST1B {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P3, REG_R0, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1B {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1b_scatter_[u32]offset[_u32](svbool_t pg, uint8_t *base, svuint32_t offsets, svuint32_t data) + /// ST1B Zdata.S, Pg, [Xbase, Zoffsets.S, UXTW] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1B {.}, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_R0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_R2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P5, REG_R7, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P0, REG_R1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4A ST1B {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JK_4A_B ST1B {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1B {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4B ST1B {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P3, REG_R0, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1B {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1b_scatter_[s64]offset[_u64](svbool_t pg, uint8_t *base, svint64_t offsets, svuint64_t data) + /// ST1B Zdata.D, Pg, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1B {.}, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_R0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_R2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P5, REG_R7, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P0, REG_R1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4A ST1B {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JK_4A_B ST1B {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1B {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4B ST1B {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P3, REG_R0, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1B {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svst1b_scatter_[u64]offset[_u64](svbool_t pg, uint8_t *base, svuint64_t offsets, svuint64_t data) + /// ST1B Zdata.D, Pg, [Xbase, Zoffsets.D] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1B {.}, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_R0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_R2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P5, REG_R7, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P0, REG_R1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4A ST1B {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JK_4A_B ST1B {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1B {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4B ST1B {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P3, REG_R0, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1B {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + + /// SetFfr : Write to the first-fault register + + /// + /// void svwrffr(svbool_t op) + /// WRFFR Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DR_1A WRFFR .B + /// theEmitter->emitIns_R(INS_sve_wrffr, EA_SCALABLE, REG_P0); + /// + public static unsafe void SetFfr(Vector value) => SetFfr(value); + + /// + /// void svwrffr(svbool_t op) + /// WRFFR Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DR_1A WRFFR .B + /// theEmitter->emitIns_R(INS_sve_wrffr, EA_SCALABLE, REG_P0); + /// + public static unsafe void SetFfr(Vector value) => SetFfr(value); + + /// + /// void svwrffr(svbool_t op) + /// WRFFR Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DR_1A WRFFR .B + /// theEmitter->emitIns_R(INS_sve_wrffr, EA_SCALABLE, REG_P0); + /// + public static unsafe void SetFfr(Vector value) => SetFfr(value); + + /// + /// void svwrffr(svbool_t op) + /// WRFFR Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DR_1A WRFFR .B + /// theEmitter->emitIns_R(INS_sve_wrffr, EA_SCALABLE, REG_P0); + /// + public static unsafe void SetFfr(Vector value) => SetFfr(value); + + /// + /// void svwrffr(svbool_t op) + /// WRFFR Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DR_1A WRFFR .B + /// theEmitter->emitIns_R(INS_sve_wrffr, EA_SCALABLE, REG_P0); + /// + public static unsafe void SetFfr(Vector value) => SetFfr(value); + + /// + /// void svwrffr(svbool_t op) + /// WRFFR Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DR_1A WRFFR .B + /// theEmitter->emitIns_R(INS_sve_wrffr, EA_SCALABLE, REG_P0); + /// + public static unsafe void SetFfr(Vector value) => SetFfr(value); + + /// + /// void svwrffr(svbool_t op) + /// WRFFR Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DR_1A WRFFR .B + /// theEmitter->emitIns_R(INS_sve_wrffr, EA_SCALABLE, REG_P0); + /// + public static unsafe void SetFfr(Vector value) => SetFfr(value); + + /// + /// void svwrffr(svbool_t op) + /// WRFFR Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DR_1A WRFFR .B + /// theEmitter->emitIns_R(INS_sve_wrffr, EA_SCALABLE, REG_P0); + /// + public static unsafe void SetFfr(Vector value) => SetFfr(value); + + + /// ShiftLeftLogical : Logical shift left + + /// + /// svint8_t svlsl[_s8]_m(svbool_t pg, svint8_t op1, svuint8_t op2) + /// LSL Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; LSL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svlsl[_s8]_x(svbool_t pg, svint8_t op1, svuint8_t op2) + /// LSL Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// LSLR Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; LSL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svlsl[_s8]_z(svbool_t pg, svint8_t op1, svuint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; LSL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; LSLR Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : left shifts LSL ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V1, REG_P0, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V3, REG_P1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V5, REG_P2, 7, INS_OPTS_SCALABLE_B); + /// IF_SVE_AN_3A LSL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_P2, REG_V0, INS_OPTS_SCALABLE_H); + /// IF_SVE_AO_3A LSL ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_P7, REG_V3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A LSL ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_V0, REG_V12, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A LSL ., ., #emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 31, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 63, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + /// + /// svint8_t svlsl_wide[_s8]_m(svbool_t pg, svint8_t op1, svuint64_t op2) + /// LSL Ztied1.B, Pg/M, Ztied1.B, Zop2.D + /// MOVPRFX Zresult, Zop1; LSL Zresult.B, Pg/M, Zresult.B, Zop2.D + /// svint8_t svlsl_wide[_s8]_x(svbool_t pg, svint8_t op1, svuint64_t op2) + /// LSL Ztied1.B, Pg/M, Ztied1.B, Zop2.D + /// LSL Zresult.B, Zop1.B, Zop2.D + /// svint8_t svlsl_wide[_s8]_z(svbool_t pg, svint8_t op1, svuint64_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; LSL Zresult.B, Pg/M, Zresult.B, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : left shifts LSL ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V1, REG_P0, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V3, REG_P1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V5, REG_P2, 7, INS_OPTS_SCALABLE_B); + /// IF_SVE_AN_3A LSL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_P2, REG_V0, INS_OPTS_SCALABLE_H); + /// IF_SVE_AO_3A LSL ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_P7, REG_V3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A LSL ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_V0, REG_V12, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A LSL ., ., #emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 31, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 63, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + /// + /// svint16_t svlsl[_s16]_m(svbool_t pg, svint16_t op1, svuint16_t op2) + /// LSL Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; LSL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svlsl[_s16]_x(svbool_t pg, svint16_t op1, svuint16_t op2) + /// LSL Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// LSLR Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; LSL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svlsl[_s16]_z(svbool_t pg, svint16_t op1, svuint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; LSL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; LSLR Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : left shifts LSL ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V1, REG_P0, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V3, REG_P1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V5, REG_P2, 7, INS_OPTS_SCALABLE_B); + /// IF_SVE_AN_3A LSL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_P2, REG_V0, INS_OPTS_SCALABLE_H); + /// IF_SVE_AO_3A LSL ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_P7, REG_V3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A LSL ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_V0, REG_V12, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A LSL ., ., #emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 31, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 63, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + /// + /// svint16_t svlsl_wide[_s16]_m(svbool_t pg, svint16_t op1, svuint64_t op2) + /// LSL Ztied1.H, Pg/M, Ztied1.H, Zop2.D + /// MOVPRFX Zresult, Zop1; LSL Zresult.H, Pg/M, Zresult.H, Zop2.D + /// svint16_t svlsl_wide[_s16]_x(svbool_t pg, svint16_t op1, svuint64_t op2) + /// LSL Ztied1.H, Pg/M, Ztied1.H, Zop2.D + /// LSL Zresult.H, Zop1.H, Zop2.D + /// svint16_t svlsl_wide[_s16]_z(svbool_t pg, svint16_t op1, svuint64_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; LSL Zresult.H, Pg/M, Zresult.H, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : left shifts LSL ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V1, REG_P0, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V3, REG_P1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V5, REG_P2, 7, INS_OPTS_SCALABLE_B); + /// IF_SVE_AN_3A LSL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_P2, REG_V0, INS_OPTS_SCALABLE_H); + /// IF_SVE_AO_3A LSL ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_P7, REG_V3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A LSL ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_V0, REG_V12, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A LSL ., ., #emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 31, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 63, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + /// + /// svint32_t svlsl[_s32]_m(svbool_t pg, svint32_t op1, svuint32_t op2) + /// LSL Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; LSL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svlsl[_s32]_x(svbool_t pg, svint32_t op1, svuint32_t op2) + /// LSL Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// LSLR Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; LSL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svlsl[_s32]_z(svbool_t pg, svint32_t op1, svuint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; LSL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; LSLR Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : left shifts LSL ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V1, REG_P0, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V3, REG_P1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V5, REG_P2, 7, INS_OPTS_SCALABLE_B); + /// IF_SVE_AN_3A LSL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_P2, REG_V0, INS_OPTS_SCALABLE_H); + /// IF_SVE_AO_3A LSL ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_P7, REG_V3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A LSL ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_V0, REG_V12, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A LSL ., ., #emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 31, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 63, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + /// + /// svint32_t svlsl_wide[_s32]_m(svbool_t pg, svint32_t op1, svuint64_t op2) + /// LSL Ztied1.S, Pg/M, Ztied1.S, Zop2.D + /// MOVPRFX Zresult, Zop1; LSL Zresult.S, Pg/M, Zresult.S, Zop2.D + /// svint32_t svlsl_wide[_s32]_x(svbool_t pg, svint32_t op1, svuint64_t op2) + /// LSL Ztied1.S, Pg/M, Ztied1.S, Zop2.D + /// LSL Zresult.S, Zop1.S, Zop2.D + /// svint32_t svlsl_wide[_s32]_z(svbool_t pg, svint32_t op1, svuint64_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; LSL Zresult.S, Pg/M, Zresult.S, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : left shifts LSL ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V1, REG_P0, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V3, REG_P1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V5, REG_P2, 7, INS_OPTS_SCALABLE_B); + /// IF_SVE_AN_3A LSL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_P2, REG_V0, INS_OPTS_SCALABLE_H); + /// IF_SVE_AO_3A LSL ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_P7, REG_V3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A LSL ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_V0, REG_V12, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A LSL ., ., #emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 31, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 63, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + /// + /// svint64_t svlsl[_s64]_m(svbool_t pg, svint64_t op1, svuint64_t op2) + /// LSL Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; LSL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svlsl[_s64]_x(svbool_t pg, svint64_t op1, svuint64_t op2) + /// LSL Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// LSLR Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; LSL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svlsl[_s64]_z(svbool_t pg, svint64_t op1, svuint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; LSL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; LSLR Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : left shifts LSL ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V1, REG_P0, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V3, REG_P1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V5, REG_P2, 7, INS_OPTS_SCALABLE_B); + /// IF_SVE_AN_3A LSL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_P2, REG_V0, INS_OPTS_SCALABLE_H); + /// IF_SVE_AO_3A LSL ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_P7, REG_V3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A LSL ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_V0, REG_V12, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A LSL ., ., #emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 31, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 63, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + /// + /// svuint8_t svlsl[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// LSL Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; LSL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svlsl[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// LSL Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// LSLR Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; LSL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svlsl[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; LSL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; LSLR Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : left shifts LSL ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V1, REG_P0, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V3, REG_P1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V5, REG_P2, 7, INS_OPTS_SCALABLE_B); + /// IF_SVE_AN_3A LSL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_P2, REG_V0, INS_OPTS_SCALABLE_H); + /// IF_SVE_AO_3A LSL ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_P7, REG_V3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A LSL ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_V0, REG_V12, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A LSL ., ., #emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 31, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 63, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + /// + /// svuint8_t svlsl_wide[_u8]_m(svbool_t pg, svuint8_t op1, svuint64_t op2) + /// LSL Ztied1.B, Pg/M, Ztied1.B, Zop2.D + /// MOVPRFX Zresult, Zop1; LSL Zresult.B, Pg/M, Zresult.B, Zop2.D + /// svuint8_t svlsl_wide[_u8]_x(svbool_t pg, svuint8_t op1, svuint64_t op2) + /// LSL Ztied1.B, Pg/M, Ztied1.B, Zop2.D + /// LSL Zresult.B, Zop1.B, Zop2.D + /// svuint8_t svlsl_wide[_u8]_z(svbool_t pg, svuint8_t op1, svuint64_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; LSL Zresult.B, Pg/M, Zresult.B, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : left shifts LSL ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V1, REG_P0, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V3, REG_P1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V5, REG_P2, 7, INS_OPTS_SCALABLE_B); + /// IF_SVE_AN_3A LSL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_P2, REG_V0, INS_OPTS_SCALABLE_H); + /// IF_SVE_AO_3A LSL ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_P7, REG_V3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A LSL ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_V0, REG_V12, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A LSL ., ., #emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 31, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 63, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + /// + /// svuint16_t svlsl[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// LSL Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; LSL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svlsl[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// LSL Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// LSLR Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; LSL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svlsl[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; LSL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; LSLR Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : left shifts LSL ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V1, REG_P0, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V3, REG_P1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V5, REG_P2, 7, INS_OPTS_SCALABLE_B); + /// IF_SVE_AN_3A LSL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_P2, REG_V0, INS_OPTS_SCALABLE_H); + /// IF_SVE_AO_3A LSL ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_P7, REG_V3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A LSL ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_V0, REG_V12, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A LSL ., ., #emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 31, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 63, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + /// + /// svuint16_t svlsl_wide[_u16]_m(svbool_t pg, svuint16_t op1, svuint64_t op2) + /// LSL Ztied1.H, Pg/M, Ztied1.H, Zop2.D + /// MOVPRFX Zresult, Zop1; LSL Zresult.H, Pg/M, Zresult.H, Zop2.D + /// svuint16_t svlsl_wide[_u16]_x(svbool_t pg, svuint16_t op1, svuint64_t op2) + /// LSL Ztied1.H, Pg/M, Ztied1.H, Zop2.D + /// LSL Zresult.H, Zop1.H, Zop2.D + /// svuint16_t svlsl_wide[_u16]_z(svbool_t pg, svuint16_t op1, svuint64_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; LSL Zresult.H, Pg/M, Zresult.H, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : left shifts LSL ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V1, REG_P0, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V3, REG_P1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V5, REG_P2, 7, INS_OPTS_SCALABLE_B); + /// IF_SVE_AN_3A LSL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_P2, REG_V0, INS_OPTS_SCALABLE_H); + /// IF_SVE_AO_3A LSL ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_P7, REG_V3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A LSL ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_V0, REG_V12, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A LSL ., ., #emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 31, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 63, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + /// + /// svuint32_t svlsl[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// LSL Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; LSL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svlsl[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// LSL Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// LSLR Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; LSL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svlsl[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; LSL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; LSLR Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : left shifts LSL ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V1, REG_P0, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V3, REG_P1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V5, REG_P2, 7, INS_OPTS_SCALABLE_B); + /// IF_SVE_AN_3A LSL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_P2, REG_V0, INS_OPTS_SCALABLE_H); + /// IF_SVE_AO_3A LSL ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_P7, REG_V3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A LSL ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_V0, REG_V12, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A LSL ., ., #emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 31, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 63, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + /// + /// svuint32_t svlsl_wide[_u32]_m(svbool_t pg, svuint32_t op1, svuint64_t op2) + /// LSL Ztied1.S, Pg/M, Ztied1.S, Zop2.D + /// MOVPRFX Zresult, Zop1; LSL Zresult.S, Pg/M, Zresult.S, Zop2.D + /// svuint32_t svlsl_wide[_u32]_x(svbool_t pg, svuint32_t op1, svuint64_t op2) + /// LSL Ztied1.S, Pg/M, Ztied1.S, Zop2.D + /// LSL Zresult.S, Zop1.S, Zop2.D + /// svuint32_t svlsl_wide[_u32]_z(svbool_t pg, svuint32_t op1, svuint64_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; LSL Zresult.S, Pg/M, Zresult.S, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : left shifts LSL ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V1, REG_P0, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V3, REG_P1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V5, REG_P2, 7, INS_OPTS_SCALABLE_B); + /// IF_SVE_AN_3A LSL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_P2, REG_V0, INS_OPTS_SCALABLE_H); + /// IF_SVE_AO_3A LSL ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_P7, REG_V3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A LSL ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_V0, REG_V12, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A LSL ., ., #emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 31, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 63, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + /// + /// svuint64_t svlsl[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// LSL Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; LSL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svlsl[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// LSL Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// LSLR Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; LSL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svlsl[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; LSL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; LSLR Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : left shifts LSL ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V1, REG_P0, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V3, REG_P1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V5, REG_P2, 7, INS_OPTS_SCALABLE_B); + /// IF_SVE_AN_3A LSL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_P2, REG_V0, INS_OPTS_SCALABLE_H); + /// IF_SVE_AO_3A LSL ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_P7, REG_V3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A LSL ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsl, EA_SCALABLE, REG_V19, REG_V0, REG_V12, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A LSL ., ., #emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 31, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V31, REG_V31, 63, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsl, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right) => ShiftLeftLogical(left, right); + + + /// ShiftRightArithmetic : Arithmetic shift right + + /// + /// svint8_t svasr[_s8]_m(svbool_t pg, svint8_t op1, svuint8_t op2) + /// ASR Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; ASR Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svasr[_s8]_x(svbool_t pg, svint8_t op1, svuint8_t op2) + /// ASR Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// ASRR Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; ASR Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svasr[_s8]_z(svbool_t pg, svint8_t op1, svuint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; ASR Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; ASRR Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : right shifts ASR ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_P0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V1, REG_P1, 2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V2, REG_P2, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V3, REG_P3, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V4, REG_P7, 8, INS_OPTS_SCALABLE_B); + /// IF_SVE_AN_3A ASR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_asr, EA_SCALABLE, REG_V5, REG_P0, REG_V21, INS_OPTS_SCALABLE_S); + /// IF_SVE_AO_3A ASR ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_asr, EA_SCALABLE, REG_V4, REG_P3, REG_V24, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A ASR ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_asr, EA_SCALABLE, REG_V9, REG_V31, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A ASR ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right) => ShiftRightArithmetic(left, right); + + /// + /// svint8_t svasr_wide[_s8]_m(svbool_t pg, svint8_t op1, svuint64_t op2) + /// ASR Ztied1.B, Pg/M, Ztied1.B, Zop2.D + /// MOVPRFX Zresult, Zop1; ASR Zresult.B, Pg/M, Zresult.B, Zop2.D + /// svint8_t svasr_wide[_s8]_x(svbool_t pg, svint8_t op1, svuint64_t op2) + /// ASR Ztied1.B, Pg/M, Ztied1.B, Zop2.D + /// ASR Zresult.B, Zop1.B, Zop2.D + /// svint8_t svasr_wide[_s8]_z(svbool_t pg, svint8_t op1, svuint64_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; ASR Zresult.B, Pg/M, Zresult.B, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : right shifts ASR ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_P0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V1, REG_P1, 2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V2, REG_P2, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V3, REG_P3, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V4, REG_P7, 8, INS_OPTS_SCALABLE_B); + /// IF_SVE_AN_3A ASR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_asr, EA_SCALABLE, REG_V5, REG_P0, REG_V21, INS_OPTS_SCALABLE_S); + /// IF_SVE_AO_3A ASR ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_asr, EA_SCALABLE, REG_V4, REG_P3, REG_V24, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A ASR ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_asr, EA_SCALABLE, REG_V9, REG_V31, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A ASR ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right) => ShiftRightArithmetic(left, right); + + /// + /// svint16_t svasr[_s16]_m(svbool_t pg, svint16_t op1, svuint16_t op2) + /// ASR Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; ASR Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svasr[_s16]_x(svbool_t pg, svint16_t op1, svuint16_t op2) + /// ASR Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// ASRR Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; ASR Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svasr[_s16]_z(svbool_t pg, svint16_t op1, svuint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; ASR Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; ASRR Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : right shifts ASR ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_P0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V1, REG_P1, 2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V2, REG_P2, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V3, REG_P3, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V4, REG_P7, 8, INS_OPTS_SCALABLE_B); + /// IF_SVE_AN_3A ASR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_asr, EA_SCALABLE, REG_V5, REG_P0, REG_V21, INS_OPTS_SCALABLE_S); + /// IF_SVE_AO_3A ASR ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_asr, EA_SCALABLE, REG_V4, REG_P3, REG_V24, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A ASR ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_asr, EA_SCALABLE, REG_V9, REG_V31, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A ASR ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right) => ShiftRightArithmetic(left, right); + + /// + /// svint16_t svasr_wide[_s16]_m(svbool_t pg, svint16_t op1, svuint64_t op2) + /// ASR Ztied1.H, Pg/M, Ztied1.H, Zop2.D + /// MOVPRFX Zresult, Zop1; ASR Zresult.H, Pg/M, Zresult.H, Zop2.D + /// svint16_t svasr_wide[_s16]_x(svbool_t pg, svint16_t op1, svuint64_t op2) + /// ASR Ztied1.H, Pg/M, Ztied1.H, Zop2.D + /// ASR Zresult.H, Zop1.H, Zop2.D + /// svint16_t svasr_wide[_s16]_z(svbool_t pg, svint16_t op1, svuint64_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; ASR Zresult.H, Pg/M, Zresult.H, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : right shifts ASR ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_P0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V1, REG_P1, 2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V2, REG_P2, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V3, REG_P3, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V4, REG_P7, 8, INS_OPTS_SCALABLE_B); + /// IF_SVE_AN_3A ASR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_asr, EA_SCALABLE, REG_V5, REG_P0, REG_V21, INS_OPTS_SCALABLE_S); + /// IF_SVE_AO_3A ASR ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_asr, EA_SCALABLE, REG_V4, REG_P3, REG_V24, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A ASR ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_asr, EA_SCALABLE, REG_V9, REG_V31, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A ASR ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right) => ShiftRightArithmetic(left, right); + + /// + /// svint32_t svasr[_s32]_m(svbool_t pg, svint32_t op1, svuint32_t op2) + /// ASR Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; ASR Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svasr[_s32]_x(svbool_t pg, svint32_t op1, svuint32_t op2) + /// ASR Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// ASRR Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; ASR Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svasr[_s32]_z(svbool_t pg, svint32_t op1, svuint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; ASR Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; ASRR Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : right shifts ASR ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_P0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V1, REG_P1, 2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V2, REG_P2, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V3, REG_P3, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V4, REG_P7, 8, INS_OPTS_SCALABLE_B); + /// IF_SVE_AN_3A ASR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_asr, EA_SCALABLE, REG_V5, REG_P0, REG_V21, INS_OPTS_SCALABLE_S); + /// IF_SVE_AO_3A ASR ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_asr, EA_SCALABLE, REG_V4, REG_P3, REG_V24, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A ASR ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_asr, EA_SCALABLE, REG_V9, REG_V31, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A ASR ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right) => ShiftRightArithmetic(left, right); + + /// + /// svint32_t svasr_wide[_s32]_m(svbool_t pg, svint32_t op1, svuint64_t op2) + /// ASR Ztied1.S, Pg/M, Ztied1.S, Zop2.D + /// MOVPRFX Zresult, Zop1; ASR Zresult.S, Pg/M, Zresult.S, Zop2.D + /// svint32_t svasr_wide[_s32]_x(svbool_t pg, svint32_t op1, svuint64_t op2) + /// ASR Ztied1.S, Pg/M, Ztied1.S, Zop2.D + /// ASR Zresult.S, Zop1.S, Zop2.D + /// svint32_t svasr_wide[_s32]_z(svbool_t pg, svint32_t op1, svuint64_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; ASR Zresult.S, Pg/M, Zresult.S, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : right shifts ASR ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_P0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V1, REG_P1, 2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V2, REG_P2, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V3, REG_P3, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V4, REG_P7, 8, INS_OPTS_SCALABLE_B); + /// IF_SVE_AN_3A ASR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_asr, EA_SCALABLE, REG_V5, REG_P0, REG_V21, INS_OPTS_SCALABLE_S); + /// IF_SVE_AO_3A ASR ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_asr, EA_SCALABLE, REG_V4, REG_P3, REG_V24, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A ASR ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_asr, EA_SCALABLE, REG_V9, REG_V31, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A ASR ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right) => ShiftRightArithmetic(left, right); + + /// + /// svint64_t svasr[_s64]_m(svbool_t pg, svint64_t op1, svuint64_t op2) + /// ASR Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; ASR Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svasr[_s64]_x(svbool_t pg, svint64_t op1, svuint64_t op2) + /// ASR Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// ASRR Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; ASR Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svasr[_s64]_z(svbool_t pg, svint64_t op1, svuint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; ASR Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; ASRR Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : right shifts ASR ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_P0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V1, REG_P1, 2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V2, REG_P2, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V3, REG_P3, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V4, REG_P7, 8, INS_OPTS_SCALABLE_B); + /// IF_SVE_AN_3A ASR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_asr, EA_SCALABLE, REG_V5, REG_P0, REG_V21, INS_OPTS_SCALABLE_S); + /// IF_SVE_AO_3A ASR ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_asr, EA_SCALABLE, REG_V4, REG_P3, REG_V24, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A ASR ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_asr, EA_SCALABLE, REG_V9, REG_V31, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A ASR ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_asr, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right) => ShiftRightArithmetic(left, right); + + + /// ShiftRightArithmeticForDivide : Arithmetic shift right for divide by immediate + + /// + /// svint8_t svasrd[_n_s8]_m(svbool_t pg, svint8_t op1, uint64_t imm2) + /// ASRD Ztied1.B, Pg/M, Ztied1.B, #imm2 + /// MOVPRFX Zresult, Zop1; ASRD Zresult.B, Pg/M, Zresult.B, #imm2 + /// svint8_t svasrd[_n_s8]_x(svbool_t pg, svint8_t op1, uint64_t imm2) + /// ASRD Ztied1.B, Pg/M, Ztied1.B, #imm2 + /// MOVPRFX Zresult, Zop1; ASRD Zresult.B, Pg/M, Zresult.B, #imm2 + /// svint8_t svasrd[_n_s8]_z(svbool_t pg, svint8_t op1, uint64_t imm2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; ASRD Zresult.B, Pg/M, Zresult.B, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : right shifts ASRD ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_asrd, EA_SCALABLE, REG_V5, REG_P0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_asrd, EA_SCALABLE, REG_V6, REG_P4, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_asrd, EA_SCALABLE, REG_V7, REG_P7, 15, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_asrd, EA_SCALABLE, REG_V31, REG_P6, 16, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftRightArithmeticForDivide(Vector value, [ConstantExpected] byte control) => ShiftRightArithmeticForDivide(value, control); + + /// + /// svint16_t svasrd[_n_s16]_m(svbool_t pg, svint16_t op1, uint64_t imm2) + /// ASRD Ztied1.H, Pg/M, Ztied1.H, #imm2 + /// MOVPRFX Zresult, Zop1; ASRD Zresult.H, Pg/M, Zresult.H, #imm2 + /// svint16_t svasrd[_n_s16]_x(svbool_t pg, svint16_t op1, uint64_t imm2) + /// ASRD Ztied1.H, Pg/M, Ztied1.H, #imm2 + /// MOVPRFX Zresult, Zop1; ASRD Zresult.H, Pg/M, Zresult.H, #imm2 + /// svint16_t svasrd[_n_s16]_z(svbool_t pg, svint16_t op1, uint64_t imm2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; ASRD Zresult.H, Pg/M, Zresult.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : right shifts ASRD ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_asrd, EA_SCALABLE, REG_V5, REG_P0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_asrd, EA_SCALABLE, REG_V6, REG_P4, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_asrd, EA_SCALABLE, REG_V7, REG_P7, 15, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_asrd, EA_SCALABLE, REG_V31, REG_P6, 16, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftRightArithmeticForDivide(Vector value, [ConstantExpected] byte control) => ShiftRightArithmeticForDivide(value, control); + + /// + /// svint32_t svasrd[_n_s32]_m(svbool_t pg, svint32_t op1, uint64_t imm2) + /// ASRD Ztied1.S, Pg/M, Ztied1.S, #imm2 + /// MOVPRFX Zresult, Zop1; ASRD Zresult.S, Pg/M, Zresult.S, #imm2 + /// svint32_t svasrd[_n_s32]_x(svbool_t pg, svint32_t op1, uint64_t imm2) + /// ASRD Ztied1.S, Pg/M, Ztied1.S, #imm2 + /// MOVPRFX Zresult, Zop1; ASRD Zresult.S, Pg/M, Zresult.S, #imm2 + /// svint32_t svasrd[_n_s32]_z(svbool_t pg, svint32_t op1, uint64_t imm2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; ASRD Zresult.S, Pg/M, Zresult.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : right shifts ASRD ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_asrd, EA_SCALABLE, REG_V5, REG_P0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_asrd, EA_SCALABLE, REG_V6, REG_P4, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_asrd, EA_SCALABLE, REG_V7, REG_P7, 15, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_asrd, EA_SCALABLE, REG_V31, REG_P6, 16, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftRightArithmeticForDivide(Vector value, [ConstantExpected] byte control) => ShiftRightArithmeticForDivide(value, control); + + /// + /// svint64_t svasrd[_n_s64]_m(svbool_t pg, svint64_t op1, uint64_t imm2) + /// ASRD Ztied1.D, Pg/M, Ztied1.D, #imm2 + /// MOVPRFX Zresult, Zop1; ASRD Zresult.D, Pg/M, Zresult.D, #imm2 + /// svint64_t svasrd[_n_s64]_x(svbool_t pg, svint64_t op1, uint64_t imm2) + /// ASRD Ztied1.D, Pg/M, Ztied1.D, #imm2 + /// MOVPRFX Zresult, Zop1; ASRD Zresult.D, Pg/M, Zresult.D, #imm2 + /// svint64_t svasrd[_n_s64]_z(svbool_t pg, svint64_t op1, uint64_t imm2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; ASRD Zresult.D, Pg/M, Zresult.D, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : right shifts ASRD ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_asrd, EA_SCALABLE, REG_V5, REG_P0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_asrd, EA_SCALABLE, REG_V6, REG_P4, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_asrd, EA_SCALABLE, REG_V7, REG_P7, 15, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_asrd, EA_SCALABLE, REG_V31, REG_P6, 16, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftRightArithmeticForDivide(Vector value, [ConstantExpected] byte control) => ShiftRightArithmeticForDivide(value, control); + + + /// ShiftRightLogical : Logical shift right + + /// + /// svuint8_t svlsr[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// LSR Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; LSR Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svlsr[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// LSR Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// LSRR Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; LSR Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svlsr[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; LSR Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; LSRR Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : right shifts LSR ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V8, REG_P5, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V9, REG_P6, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_P7, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_P0, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V5, REG_P1, 32, INS_OPTS_SCALABLE_S); + /// IF_SVE_AN_3A LSR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_lsr, EA_SCALABLE, REG_V5, REG_P5, REG_V6, INS_OPTS_SCALABLE_B); + /// IF_SVE_AO_3A LSR ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_P0, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A LSR ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsr, EA_SCALABLE, REG_V29, REG_V10, REG_V22, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A LSR ., ., #emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftRightLogical(Vector left, Vector right) => ShiftRightLogical(left, right); + + /// + /// svuint8_t svlsr_wide[_u8]_m(svbool_t pg, svuint8_t op1, svuint64_t op2) + /// LSR Ztied1.B, Pg/M, Ztied1.B, Zop2.D + /// MOVPRFX Zresult, Zop1; LSR Zresult.B, Pg/M, Zresult.B, Zop2.D + /// svuint8_t svlsr_wide[_u8]_x(svbool_t pg, svuint8_t op1, svuint64_t op2) + /// LSR Ztied1.B, Pg/M, Ztied1.B, Zop2.D + /// LSR Zresult.B, Zop1.B, Zop2.D + /// svuint8_t svlsr_wide[_u8]_z(svbool_t pg, svuint8_t op1, svuint64_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; LSR Zresult.B, Pg/M, Zresult.B, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : right shifts LSR ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V8, REG_P5, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V9, REG_P6, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_P7, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_P0, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V5, REG_P1, 32, INS_OPTS_SCALABLE_S); + /// IF_SVE_AN_3A LSR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_lsr, EA_SCALABLE, REG_V5, REG_P5, REG_V6, INS_OPTS_SCALABLE_B); + /// IF_SVE_AO_3A LSR ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_P0, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A LSR ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsr, EA_SCALABLE, REG_V29, REG_V10, REG_V22, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A LSR ., ., #emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftRightLogical(Vector left, Vector right) => ShiftRightLogical(left, right); + + /// + /// svuint16_t svlsr[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// LSR Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; LSR Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svlsr[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// LSR Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// LSRR Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; LSR Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svlsr[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; LSR Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; LSRR Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : right shifts LSR ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V8, REG_P5, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V9, REG_P6, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_P7, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_P0, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V5, REG_P1, 32, INS_OPTS_SCALABLE_S); + /// IF_SVE_AN_3A LSR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_lsr, EA_SCALABLE, REG_V5, REG_P5, REG_V6, INS_OPTS_SCALABLE_B); + /// IF_SVE_AO_3A LSR ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_P0, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A LSR ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsr, EA_SCALABLE, REG_V29, REG_V10, REG_V22, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A LSR ., ., #emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftRightLogical(Vector left, Vector right) => ShiftRightLogical(left, right); + + /// + /// svuint16_t svlsr_wide[_u16]_m(svbool_t pg, svuint16_t op1, svuint64_t op2) + /// LSR Ztied1.H, Pg/M, Ztied1.H, Zop2.D + /// MOVPRFX Zresult, Zop1; LSR Zresult.H, Pg/M, Zresult.H, Zop2.D + /// svuint16_t svlsr_wide[_u16]_x(svbool_t pg, svuint16_t op1, svuint64_t op2) + /// LSR Ztied1.H, Pg/M, Ztied1.H, Zop2.D + /// LSR Zresult.H, Zop1.H, Zop2.D + /// svuint16_t svlsr_wide[_u16]_z(svbool_t pg, svuint16_t op1, svuint64_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; LSR Zresult.H, Pg/M, Zresult.H, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : right shifts LSR ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V8, REG_P5, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V9, REG_P6, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_P7, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_P0, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V5, REG_P1, 32, INS_OPTS_SCALABLE_S); + /// IF_SVE_AN_3A LSR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_lsr, EA_SCALABLE, REG_V5, REG_P5, REG_V6, INS_OPTS_SCALABLE_B); + /// IF_SVE_AO_3A LSR ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_P0, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A LSR ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsr, EA_SCALABLE, REG_V29, REG_V10, REG_V22, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A LSR ., ., #emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftRightLogical(Vector left, Vector right) => ShiftRightLogical(left, right); + + /// + /// svuint32_t svlsr[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// LSR Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; LSR Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svlsr[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// LSR Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// LSRR Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; LSR Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svlsr[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; LSR Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; LSRR Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : right shifts LSR ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V8, REG_P5, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V9, REG_P6, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_P7, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_P0, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V5, REG_P1, 32, INS_OPTS_SCALABLE_S); + /// IF_SVE_AN_3A LSR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_lsr, EA_SCALABLE, REG_V5, REG_P5, REG_V6, INS_OPTS_SCALABLE_B); + /// IF_SVE_AO_3A LSR ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_P0, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A LSR ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsr, EA_SCALABLE, REG_V29, REG_V10, REG_V22, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A LSR ., ., #emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftRightLogical(Vector left, Vector right) => ShiftRightLogical(left, right); + + /// + /// svuint32_t svlsr_wide[_u32]_m(svbool_t pg, svuint32_t op1, svuint64_t op2) + /// LSR Ztied1.S, Pg/M, Ztied1.S, Zop2.D + /// MOVPRFX Zresult, Zop1; LSR Zresult.S, Pg/M, Zresult.S, Zop2.D + /// svuint32_t svlsr_wide[_u32]_x(svbool_t pg, svuint32_t op1, svuint64_t op2) + /// LSR Ztied1.S, Pg/M, Ztied1.S, Zop2.D + /// LSR Zresult.S, Zop1.S, Zop2.D + /// svuint32_t svlsr_wide[_u32]_z(svbool_t pg, svuint32_t op1, svuint64_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; LSR Zresult.S, Pg/M, Zresult.S, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : right shifts LSR ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V8, REG_P5, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V9, REG_P6, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_P7, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_P0, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V5, REG_P1, 32, INS_OPTS_SCALABLE_S); + /// IF_SVE_AN_3A LSR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_lsr, EA_SCALABLE, REG_V5, REG_P5, REG_V6, INS_OPTS_SCALABLE_B); + /// IF_SVE_AO_3A LSR ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_P0, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A LSR ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsr, EA_SCALABLE, REG_V29, REG_V10, REG_V22, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A LSR ., ., #emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftRightLogical(Vector left, Vector right) => ShiftRightLogical(left, right); + + /// + /// svuint64_t svlsr[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// LSR Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; LSR Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svlsr[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// LSR Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// LSRR Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; LSR Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svlsr[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; LSR Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; LSRR Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : right shifts LSR ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V8, REG_P5, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V9, REG_P6, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_P7, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_P0, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V5, REG_P1, 32, INS_OPTS_SCALABLE_S); + /// IF_SVE_AN_3A LSR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_lsr, EA_SCALABLE, REG_V5, REG_P5, REG_V6, INS_OPTS_SCALABLE_B); + /// IF_SVE_AO_3A LSR ., /M, ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_P0, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WIDE); + /// IF_SVE_BG_3A LSR ., ., .D + /// theEmitter->emitIns_R_R_R(INS_sve_lsr, EA_SCALABLE, REG_V29, REG_V10, REG_V22, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED_WIDE); + /// IF_SVE_BF_2A LSR ., ., #emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 5, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 9, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 15, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 33, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_I(INS_sve_lsr, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftRightLogical(Vector left, Vector right) => ShiftRightLogical(left, right); + + + /// SignExtend16 : Sign-extend the low 16 bits + + /// + /// svint32_t svexth[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// SXTH Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; SXTH Zresult.S, Pg/M, Zop.S + /// svint32_t svexth[_s32]_x(svbool_t pg, svint32_t op) + /// SXTH Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; SXTH Zresult.S, Pg/M, Zop.S + /// svint32_t svexth[_s32]_z(svbool_t pg, svint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; SXTH Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AQ_3A SXTH ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_sxth, EA_SCALABLE, REG_V21, REG_P2, REG_V10, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_sxth, EA_SCALABLE, REG_V21, REG_P2, REG_V10, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SignExtend16(Vector value) => SignExtend16(value); + + /// + /// svint64_t svexth[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// SXTH Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; SXTH Zresult.D, Pg/M, Zop.D + /// svint64_t svexth[_s64]_x(svbool_t pg, svint64_t op) + /// SXTH Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; SXTH Zresult.D, Pg/M, Zop.D + /// svint64_t svexth[_s64]_z(svbool_t pg, svint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; SXTH Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AQ_3A SXTH ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_sxth, EA_SCALABLE, REG_V21, REG_P2, REG_V10, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_sxth, EA_SCALABLE, REG_V21, REG_P2, REG_V10, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SignExtend16(Vector value) => SignExtend16(value); + + + /// SignExtend32 : Sign-extend the low 32 bits + + /// + /// svint64_t svextw[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// SXTW Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; SXTW Zresult.D, Pg/M, Zop.D + /// svint64_t svextw[_s64]_x(svbool_t pg, svint64_t op) + /// SXTW Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; SXTW Zresult.D, Pg/M, Zop.D + /// svint64_t svextw[_s64]_z(svbool_t pg, svint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; SXTW Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AQ_3A SXTW .D, /M, .D + /// theEmitter->emitIns_R_R_R(INS_sve_sxtw, EA_SCALABLE, REG_V20, REG_P3, REG_V11, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SignExtend32(Vector value) => SignExtend32(value); + + + /// SignExtend8 : Sign-extend the low 8 bits + + /// + /// svint16_t svextb[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) + /// SXTB Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; SXTB Zresult.H, Pg/M, Zop.H + /// svint16_t svextb[_s16]_x(svbool_t pg, svint16_t op) + /// SXTB Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; SXTB Zresult.H, Pg/M, Zop.H + /// svint16_t svextb[_s16]_z(svbool_t pg, svint16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; SXTB Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_AQ_3A SXTB ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_sxtb, EA_SCALABLE, REG_V22, REG_P1, REG_V9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_sxtb, EA_SCALABLE, REG_V22, REG_P1, REG_V9, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_sxtb, EA_SCALABLE, REG_V22, REG_P1, REG_V9, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SignExtend8(Vector value) => SignExtend8(value); + + /// + /// svint32_t svextb[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// SXTB Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; SXTB Zresult.S, Pg/M, Zop.S + /// svint32_t svextb[_s32]_x(svbool_t pg, svint32_t op) + /// SXTB Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; SXTB Zresult.S, Pg/M, Zop.S + /// svint32_t svextb[_s32]_z(svbool_t pg, svint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; SXTB Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AQ_3A SXTB ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_sxtb, EA_SCALABLE, REG_V22, REG_P1, REG_V9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_sxtb, EA_SCALABLE, REG_V22, REG_P1, REG_V9, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_sxtb, EA_SCALABLE, REG_V22, REG_P1, REG_V9, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SignExtend8(Vector value) => SignExtend8(value); + + /// + /// svint64_t svextb[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// SXTB Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; SXTB Zresult.D, Pg/M, Zop.D + /// svint64_t svextb[_s64]_x(svbool_t pg, svint64_t op) + /// SXTB Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; SXTB Zresult.D, Pg/M, Zop.D + /// svint64_t svextb[_s64]_z(svbool_t pg, svint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; SXTB Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AQ_3A SXTB ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_sxtb, EA_SCALABLE, REG_V22, REG_P1, REG_V9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_sxtb, EA_SCALABLE, REG_V22, REG_P1, REG_V9, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_sxtb, EA_SCALABLE, REG_V22, REG_P1, REG_V9, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SignExtend8(Vector value) => SignExtend8(value); + + + /// SignExtendWideningLower : Unpack and extend low half + + /// + /// svint16_t svunpklo[_s16](svint8_t op) + /// SUNPKLO Zresult.H, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_CH_2A SUNPKLO ., . + /// theEmitter->emitIns_R_R(INS_sve_sunpklo, EA_SCALABLE, REG_V1, REG_V5, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SignExtendWideningLower(Vector value) => SignExtendWideningLower(value); + + /// + /// svint32_t svunpklo[_s32](svint16_t op) + /// SUNPKLO Zresult.S, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_CH_2A SUNPKLO ., . + /// theEmitter->emitIns_R_R(INS_sve_sunpklo, EA_SCALABLE, REG_V1, REG_V5, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SignExtendWideningLower(Vector value) => SignExtendWideningLower(value); + + /// + /// svint64_t svunpklo[_s64](svint32_t op) + /// SUNPKLO Zresult.D, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_CH_2A SUNPKLO ., . + /// theEmitter->emitIns_R_R(INS_sve_sunpklo, EA_SCALABLE, REG_V1, REG_V5, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SignExtendWideningLower(Vector value) => SignExtendWideningLower(value); + + + /// SignExtendWideningUpper : Unpack and extend high half + + /// + /// svint16_t svunpkhi[_s16](svint8_t op) + /// SUNPKHI Zresult.H, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_CH_2A SUNPKHI ., . + /// theEmitter->emitIns_R_R(INS_sve_sunpkhi, EA_SCALABLE, REG_V2, REG_V4, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector SignExtendWideningUpper(Vector value) => SignExtendWideningUpper(value); + + /// + /// svint32_t svunpkhi[_s32](svint16_t op) + /// SUNPKHI Zresult.S, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_CH_2A SUNPKHI ., . + /// theEmitter->emitIns_R_R(INS_sve_sunpkhi, EA_SCALABLE, REG_V2, REG_V4, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector SignExtendWideningUpper(Vector value) => SignExtendWideningUpper(value); + + /// + /// svint64_t svunpkhi[_s64](svint32_t op) + /// SUNPKHI Zresult.D, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_CH_2A SUNPKHI ., . + /// theEmitter->emitIns_R_R(INS_sve_sunpkhi, EA_SCALABLE, REG_V2, REG_V4, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector SignExtendWideningUpper(Vector value) => SignExtendWideningUpper(value); + + + /// Splice : Splice two vectors under predicate control + + /// + /// svint8_t svsplice[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// SPLICE Ztied1.B, Pg, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; SPLICE Zresult.B, Pg, Zresult.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_CV_3A SPLICE ., , {., .} + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V0, REG_P0, REG_V30, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V3, REG_P7, REG_V27, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// IF_SVE_CV_3B SPLICE ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V1, REG_P1, REG_V29, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V2, REG_P6, REG_V28, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) => Splice(mask, left, right); + + /// + /// svint16_t svsplice[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// SPLICE Ztied1.H, Pg, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; SPLICE Zresult.H, Pg, Zresult.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_CV_3A SPLICE ., , {., .} + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V0, REG_P0, REG_V30, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V3, REG_P7, REG_V27, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// IF_SVE_CV_3B SPLICE ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V1, REG_P1, REG_V29, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V2, REG_P6, REG_V28, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) => Splice(mask, left, right); + + /// + /// svint32_t svsplice[_s32](svbool_t pg, svint32_t op1, svint32_t op2) + /// SPLICE Ztied1.S, Pg, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; SPLICE Zresult.S, Pg, Zresult.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_CV_3A SPLICE ., , {., .} + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V0, REG_P0, REG_V30, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V3, REG_P7, REG_V27, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// IF_SVE_CV_3B SPLICE ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V1, REG_P1, REG_V29, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V2, REG_P6, REG_V28, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) => Splice(mask, left, right); + + /// + /// svint64_t svsplice[_s64](svbool_t pg, svint64_t op1, svint64_t op2) + /// SPLICE Ztied1.D, Pg, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; SPLICE Zresult.D, Pg, Zresult.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CV_3A SPLICE ., , {., .} + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V0, REG_P0, REG_V30, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V3, REG_P7, REG_V27, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// IF_SVE_CV_3B SPLICE ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V1, REG_P1, REG_V29, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V2, REG_P6, REG_V28, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) => Splice(mask, left, right); + + /// + /// svuint8_t svsplice[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// SPLICE Ztied1.B, Pg, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; SPLICE Zresult.B, Pg, Zresult.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_CV_3A SPLICE ., , {., .} + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V0, REG_P0, REG_V30, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V3, REG_P7, REG_V27, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// IF_SVE_CV_3B SPLICE ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V1, REG_P1, REG_V29, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V2, REG_P6, REG_V28, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) => Splice(mask, left, right); + + /// + /// svuint16_t svsplice[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// SPLICE Ztied1.H, Pg, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; SPLICE Zresult.H, Pg, Zresult.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_CV_3A SPLICE ., , {., .} + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V0, REG_P0, REG_V30, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V3, REG_P7, REG_V27, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// IF_SVE_CV_3B SPLICE ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V1, REG_P1, REG_V29, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V2, REG_P6, REG_V28, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) => Splice(mask, left, right); + + /// + /// svuint32_t svsplice[_u32](svbool_t pg, svuint32_t op1, svuint32_t op2) + /// SPLICE Ztied1.S, Pg, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; SPLICE Zresult.S, Pg, Zresult.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_CV_3A SPLICE ., , {., .} + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V0, REG_P0, REG_V30, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V3, REG_P7, REG_V27, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// IF_SVE_CV_3B SPLICE ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V1, REG_P1, REG_V29, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V2, REG_P6, REG_V28, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) => Splice(mask, left, right); + + /// + /// svuint64_t svsplice[_u64](svbool_t pg, svuint64_t op1, svuint64_t op2) + /// SPLICE Ztied1.D, Pg, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; SPLICE Zresult.D, Pg, Zresult.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CV_3A SPLICE ., , {., .} + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V0, REG_P0, REG_V30, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V3, REG_P7, REG_V27, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// IF_SVE_CV_3B SPLICE ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V1, REG_P1, REG_V29, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V2, REG_P6, REG_V28, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) => Splice(mask, left, right); + + /// + /// svfloat32_t svsplice[_f32](svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// SPLICE Ztied1.S, Pg, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; SPLICE Zresult.S, Pg, Zresult.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_CV_3A SPLICE ., , {., .} + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V0, REG_P0, REG_V30, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V3, REG_P7, REG_V27, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// IF_SVE_CV_3B SPLICE ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V1, REG_P1, REG_V29, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V2, REG_P6, REG_V28, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) => Splice(mask, left, right); + + /// + /// svfloat64_t svsplice[_f64](svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// SPLICE Ztied1.D, Pg, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; SPLICE Zresult.D, Pg, Zresult.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_CV_3A SPLICE ., , {., .} + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V0, REG_P0, REG_V30, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V3, REG_P7, REG_V27, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// IF_SVE_CV_3B SPLICE ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V1, REG_P1, REG_V29, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V2, REG_P6, REG_V28, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) => Splice(mask, left, right); + + + /// Sqrt : Square root + + /// + /// svfloat32_t svsqrt[_f32]_m(svfloat32_t inactive, svbool_t pg, svfloat32_t op) + /// FSQRT Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; FSQRT Zresult.S, Pg/M, Zop.S + /// svfloat32_t svsqrt[_f32]_x(svbool_t pg, svfloat32_t op) + /// FSQRT Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; FSQRT Zresult.S, Pg/M, Zop.S + /// svfloat32_t svsqrt[_f32]_z(svbool_t pg, svfloat32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; FSQRT Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_HR_3A FSQRT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_fsqrt, EA_SCALABLE, REG_V6, REG_P6, REG_V6, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Sqrt(Vector value) => Sqrt(value); + + /// + /// svfloat64_t svsqrt[_f64]_m(svfloat64_t inactive, svbool_t pg, svfloat64_t op) + /// FSQRT Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; FSQRT Zresult.D, Pg/M, Zop.D + /// svfloat64_t svsqrt[_f64]_x(svbool_t pg, svfloat64_t op) + /// FSQRT Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; FSQRT Zresult.D, Pg/M, Zop.D + /// svfloat64_t svsqrt[_f64]_z(svbool_t pg, svfloat64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; FSQRT Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_HR_3A FSQRT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_fsqrt, EA_SCALABLE, REG_V6, REG_P6, REG_V6, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Sqrt(Vector value) => Sqrt(value); + + + /// Store : Non-truncating store + + /// + /// void svst1[_s8](svbool_t pg, int8_t *base, svint8_t data) + /// ST1B Zdata.B, Pg, [Xarray, Xindex] + /// ST1B Zdata.B, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1B {.}, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_R0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_R2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P5, REG_R7, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P0, REG_R1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4A ST1B {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JK_4A_B ST1B {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1B {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4B ST1B {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P3, REG_R0, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1B {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Store(Vector mask, sbyte* address, Vector data) => Store(mask, address, data); + + /// + /// void svst2[_s8](svbool_t pg, int8_t *base, svint8x2_t data) + /// ST2B {Zdata0.B, Zdata1.B}, Pg, [Xarray, Xindex] + /// ST2B {Zdata0.B, Zdata1.B}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST2B {.B, .B }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st2b, EA_SCALABLE, REG_V0, REG_P1, REG_R2, -16, INS_OPTS_SCALABLE_B); + /// IF_SVE_JC_4A ST2B {.B, .B }, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st2b, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R4, INS_OPTS_SCALABLE_B); + /// + public static unsafe void Store(Vector mask, sbyte* address, (Vector Value1, Vector Value2) data) => Store(mask, address, Value1,); + + /// + /// void svst3[_s8](svbool_t pg, int8_t *base, svint8x3_t data) + /// ST3B {Zdata0.B - Zdata2.B}, Pg, [Xarray, Xindex] + /// ST3B {Zdata0.B - Zdata2.B}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST3B {.B, .B, .B }, , [{, #, + /// theEmitter->emitIns_R_R_R_I(INS_sve_st3b, EA_SCALABLE, REG_V7, REG_P6, REG_R5, -24, INS_OPTS_SCALABLE_B); + /// IF_SVE_JC_4A ST3B {.B, .B, .B }, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st3b, EA_SCALABLE, REG_V0, REG_P1, REG_R3, REG_R4, INS_OPTS_SCALABLE_B); + /// + public static unsafe void Store(Vector mask, sbyte* address, (Vector Value1, Vector Value2, Vector Value3) data) => Store(mask, address, Value1,); + + /// + /// void svst4[_s8](svbool_t pg, int8_t *base, svint8x4_t data) + /// ST4B {Zdata0.B - Zdata3.B}, Pg, [Xarray, Xindex] + /// ST4B {Zdata0.B - Zdata3.B}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST4B {.B, .B, .B, .B }, , [{, + /// theEmitter->emitIns_R_R_R_I(INS_sve_st4b, EA_SCALABLE, REG_V0, REG_P0, REG_R0, -32, INS_OPTS_SCALABLE_B); + /// IF_SVE_JC_4A ST4B {.B, .B, .B, .B }, , [, + /// theEmitter->emitIns_R_R_R_R(INS_sve_st4b, EA_SCALABLE, REG_V0, REG_P6, REG_R5, REG_R4, INS_OPTS_SCALABLE_B); + /// + public static unsafe void Store(Vector mask, sbyte* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) => Store(mask, address, Value1,); + + /// + /// void svst1[_s16](svbool_t pg, int16_t *base, svint16_t data) + /// ST1H Zdata.H, Pg, [Xarray, Xindex, LSL #1] + /// ST1H Zdata.H, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Store(Vector mask, short* address, Vector data) => Store(mask, address, data); + + /// + /// void svst2[_s16](svbool_t pg, int16_t *base, svint16x2_t data) + /// ST2H {Zdata0.H, Zdata1.H}, Pg, [Xarray, Xindex, LSL #1] + /// ST2H {Zdata0.H, Zdata1.H}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST2H {.H, .H }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st2h, EA_SCALABLE, REG_V6, REG_P7, REG_R8, -16, INS_OPTS_SCALABLE_H); + /// IF_SVE_JC_4A ST2H {.H, .H }, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st2h, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_R6, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, short* address, (Vector Value1, Vector Value2) data) => Store(mask, address, Value1,); + + /// + /// void svst3[_s16](svbool_t pg, int16_t *base, svint16x3_t data) + /// ST3H {Zdata0.H - Zdata2.H}, Pg, [Xarray, Xindex, LSL #1] + /// ST3H {Zdata0.H - Zdata2.H}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST3H {.H, .H, .H }, , [{, #, + /// theEmitter->emitIns_R_R_R_I(INS_sve_st3h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -24, INS_OPTS_SCALABLE_H); + /// IF_SVE_JC_4A ST3H {.H, .H, .H }, , [, , + /// theEmitter->emitIns_R_R_R_R(INS_sve_st3h, EA_SCALABLE, REG_V1, REG_P0, REG_R3, REG_R8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, short* address, (Vector Value1, Vector Value2, Vector Value3) data) => Store(mask, address, Value1,); + + /// + /// void svst4[_s16](svbool_t pg, int16_t *base, svint16x4_t data) + /// ST4H {Zdata0.H - Zdata3.H}, Pg, [Xarray, Xindex, LSL #1] + /// ST4H {Zdata0.H - Zdata3.H}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST4H {.H, .H, .H, .H }, , [{, + /// theEmitter->emitIns_R_R_R_I(INS_sve_st4h, EA_SCALABLE, REG_V3, REG_P5, REG_R2, -32, INS_OPTS_SCALABLE_H); + /// IF_SVE_JC_4A ST4H {.H, .H, .H, .H }, , + /// theEmitter->emitIns_R_R_R_R(INS_sve_st4h, EA_SCALABLE, REG_V1, REG_P0, REG_R9, REG_R8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, short* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) => Store(mask, address, Value1,); + + /// + /// void svst1[_s32](svbool_t pg, int32_t *base, svint32_t data) + /// ST1W Zdata.S, Pg, [Xarray, Xindex, LSL #2] + /// ST1W Zdata.S, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1W {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P4, REG_R5, 6, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JD_4B ST1W {.}, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1W {.S }, , [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1W {.D }, , [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1W {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1W {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3B ST1W {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_JD_4C ST1W {.Q }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R8, REG_R7, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1W {.D }, , [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1W {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P5, REG_R1, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1W {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Store(Vector mask, int* address, Vector data) => Store(mask, address, data); + + /// + /// void svst2[_s32](svbool_t pg, int32_t *base, svint32x2_t data) + /// ST2W {Zdata0.S, Zdata1.S}, Pg, [Xarray, Xindex, LSL #2] + /// ST2W {Zdata0.S, Zdata1.S}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST2W {.S, .S }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st2w, EA_SCALABLE, REG_V8, REG_P1, REG_R9, -16, INS_OPTS_SCALABLE_S); + /// IF_SVE_JC_4A ST2W {.S, .S }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st2w, EA_SCALABLE, REG_V0, REG_P2, REG_R8, REG_R7, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, int* address, (Vector Value1, Vector Value2) data) => Store(mask, address, Value1,); + + /// + /// void svst3[_s32](svbool_t pg, int32_t *base, svint32x3_t data) + /// ST3W {Zdata0.S - Zdata2.S}, Pg, [Xarray, Xindex, LSL #2] + /// ST3W {Zdata0.S - Zdata2.S}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST3W {.S, .S, .S }, , [{, #, + /// theEmitter->emitIns_R_R_R_I(INS_sve_st3w, EA_SCALABLE, REG_V1, REG_P3, REG_R8, -24, INS_OPTS_SCALABLE_S); + /// IF_SVE_JC_4A ST3W {.S, .S, .S }, , [, , + /// theEmitter->emitIns_R_R_R_R(INS_sve_st3w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, int* address, (Vector Value1, Vector Value2, Vector Value3) data) => Store(mask, address, Value1,); + + /// + /// void svst4[_s32](svbool_t pg, int32_t *base, svint32x4_t data) + /// ST4W {Zdata0.S - Zdata3.S}, Pg, [Xarray, Xindex, LSL #2] + /// ST4W {Zdata0.S - Zdata3.S}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST4W {.S, .S, .S, .S }, , [{, + /// theEmitter->emitIns_R_R_R_I(INS_sve_st4w, EA_SCALABLE, REG_V0, REG_P1, REG_R5, 28, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st4w, EA_SCALABLE, REG_V31, REG_P1, REG_R5, 28, INS_OPTS_SCALABLE_S); + /// IF_SVE_JC_4A ST4W {.S, .S, .S, .S }, , + /// theEmitter->emitIns_R_R_R_R(INS_sve_st4w, EA_SCALABLE, REG_V0, REG_P1, REG_R4, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, int* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) => Store(mask, address, Value1,); + + /// + /// void svst1[_s64](svbool_t pg, int64_t *base, svint64_t data) + /// ST1D Zdata.D, Pg, [Xarray, Xindex, LSL #3] + /// ST1D Zdata.D, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JN_3C_D ST1D {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V2, REG_P1, REG_R0, 0, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JJ_4A ST1D {.D }, , [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1D {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P1, REG_R2, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P1, REG_R2, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JD_4C ST1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V1, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JD_4C_A ST1D {.Q }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P5, REG_R6, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1D {.D }, , [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_C ST1D {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V6, INS_OPTS_SCALABLE_D); + /// IF_SVE_JL_3A ST1D {.D }, , [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P7, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P7, REG_V4, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Store(Vector mask, long* address, Vector data) => Store(mask, address, data); + + /// + /// void svst2[_s64](svbool_t pg, int64_t *base, svint64x2_t data) + /// ST2D {Zdata0.D, Zdata1.D}, Pg, [Xarray, Xindex, LSL #3] + /// ST2D {Zdata0.D, Zdata1.D}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST2D {.D, .D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st2d, EA_SCALABLE, REG_V5, REG_P4, REG_R3, -16, INS_OPTS_SCALABLE_D); + /// IF_SVE_JC_4A ST2D {.D, .D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st2d, EA_SCALABLE, REG_V1, REG_P7, REG_R6, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, long* address, (Vector Value1, Vector Value2) data) => Store(mask, address, Value1,); + + /// + /// void svst3[_s64](svbool_t pg, int64_t *base, svint64x3_t data) + /// ST3D {Zdata0.D - Zdata2.D}, Pg, [Xarray, Xindex, LSL #3] + /// ST3D {Zdata0.D - Zdata2.D}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST3D {.D, .D, .D }, , [{, #, + /// theEmitter->emitIns_R_R_R_I(INS_sve_st3d, EA_SCALABLE, REG_V2, REG_P3, REG_R4, -24, INS_OPTS_SCALABLE_D); + /// IF_SVE_JC_4A ST3D {.D, .D, .D }, , [, , + /// theEmitter->emitIns_R_R_R_R(INS_sve_st3d, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R6, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, long* address, (Vector Value1, Vector Value2, Vector Value3) data) => Store(mask, address, Value1,); + + /// + /// void svst4[_s64](svbool_t pg, int64_t *base, svint64x4_t data) + /// ST4D {Zdata0.D - Zdata3.D}, Pg, [Xarray, Xindex, LSL #3] + /// ST4D {Zdata0.D - Zdata3.D}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST4D {.D, .D, .D, .D }, , [{, + /// theEmitter->emitIns_R_R_R_I(INS_sve_st4d, EA_SCALABLE, REG_V2, REG_P0, REG_R1, -32, INS_OPTS_SCALABLE_D); + /// IF_SVE_JC_4A ST4D {.D, .D, .D, .D }, , + /// theEmitter->emitIns_R_R_R_R(INS_sve_st4d, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, long* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) => Store(mask, address, Value1,); + + /// + /// void svst1[_u8](svbool_t pg, uint8_t *base, svuint8_t data) + /// ST1B Zdata.B, Pg, [Xarray, Xindex] + /// ST1B Zdata.B, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1B {.}, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_R0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_R2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P5, REG_R7, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P0, REG_R1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4A ST1B {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JK_4A_B ST1B {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1B {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4B ST1B {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P3, REG_R0, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1B {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Store(Vector mask, byte* address, Vector data) => Store(mask, address, data); + + /// + /// void svst2[_u8](svbool_t pg, uint8_t *base, svuint8x2_t data) + /// ST2B {Zdata0.B, Zdata1.B}, Pg, [Xarray, Xindex] + /// ST2B {Zdata0.B, Zdata1.B}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST2B {.B, .B }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st2b, EA_SCALABLE, REG_V0, REG_P1, REG_R2, -16, INS_OPTS_SCALABLE_B); + /// IF_SVE_JC_4A ST2B {.B, .B }, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st2b, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R4, INS_OPTS_SCALABLE_B); + /// + public static unsafe void Store(Vector mask, byte* address, (Vector Value1, Vector Value2) data) => Store(mask, address, Value1,); + + /// + /// void svst3[_u8](svbool_t pg, uint8_t *base, svuint8x3_t data) + /// ST3B {Zdata0.B - Zdata2.B}, Pg, [Xarray, Xindex] + /// ST3B {Zdata0.B - Zdata2.B}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST3B {.B, .B, .B }, , [{, #, + /// theEmitter->emitIns_R_R_R_I(INS_sve_st3b, EA_SCALABLE, REG_V7, REG_P6, REG_R5, -24, INS_OPTS_SCALABLE_B); + /// IF_SVE_JC_4A ST3B {.B, .B, .B }, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st3b, EA_SCALABLE, REG_V0, REG_P1, REG_R3, REG_R4, INS_OPTS_SCALABLE_B); + /// + public static unsafe void Store(Vector mask, byte* address, (Vector Value1, Vector Value2, Vector Value3) data) => Store(mask, address, Value1,); + + /// + /// void svst4[_u8](svbool_t pg, uint8_t *base, svuint8x4_t data) + /// ST4B {Zdata0.B - Zdata3.B}, Pg, [Xarray, Xindex] + /// ST4B {Zdata0.B - Zdata3.B}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST4B {.B, .B, .B, .B }, , [{, + /// theEmitter->emitIns_R_R_R_I(INS_sve_st4b, EA_SCALABLE, REG_V0, REG_P0, REG_R0, -32, INS_OPTS_SCALABLE_B); + /// IF_SVE_JC_4A ST4B {.B, .B, .B, .B }, , [, + /// theEmitter->emitIns_R_R_R_R(INS_sve_st4b, EA_SCALABLE, REG_V0, REG_P6, REG_R5, REG_R4, INS_OPTS_SCALABLE_B); + /// + public static unsafe void Store(Vector mask, byte* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) => Store(mask, address, Value1,); + + /// + /// void svst1[_u16](svbool_t pg, uint16_t *base, svuint16_t data) + /// ST1H Zdata.H, Pg, [Xarray, Xindex, LSL #1] + /// ST1H Zdata.H, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Store(Vector mask, ushort* address, Vector data) => Store(mask, address, data); + + /// + /// void svst2[_u16](svbool_t pg, uint16_t *base, svuint16x2_t data) + /// ST2H {Zdata0.H, Zdata1.H}, Pg, [Xarray, Xindex, LSL #1] + /// ST2H {Zdata0.H, Zdata1.H}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST2H {.H, .H }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st2h, EA_SCALABLE, REG_V6, REG_P7, REG_R8, -16, INS_OPTS_SCALABLE_H); + /// IF_SVE_JC_4A ST2H {.H, .H }, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st2h, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_R6, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, ushort* address, (Vector Value1, Vector Value2) data) => Store(mask, address, Value1,); + + /// + /// void svst3[_u16](svbool_t pg, uint16_t *base, svuint16x3_t data) + /// ST3H {Zdata0.H - Zdata2.H}, Pg, [Xarray, Xindex, LSL #1] + /// ST3H {Zdata0.H - Zdata2.H}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST3H {.H, .H, .H }, , [{, #, + /// theEmitter->emitIns_R_R_R_I(INS_sve_st3h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -24, INS_OPTS_SCALABLE_H); + /// IF_SVE_JC_4A ST3H {.H, .H, .H }, , [, , + /// theEmitter->emitIns_R_R_R_R(INS_sve_st3h, EA_SCALABLE, REG_V1, REG_P0, REG_R3, REG_R8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, ushort* address, (Vector Value1, Vector Value2, Vector Value3) data) => Store(mask, address, Value1,); + + /// + /// void svst4[_u16](svbool_t pg, uint16_t *base, svuint16x4_t data) + /// ST4H {Zdata0.H - Zdata3.H}, Pg, [Xarray, Xindex, LSL #1] + /// ST4H {Zdata0.H - Zdata3.H}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST4H {.H, .H, .H, .H }, , [{, + /// theEmitter->emitIns_R_R_R_I(INS_sve_st4h, EA_SCALABLE, REG_V3, REG_P5, REG_R2, -32, INS_OPTS_SCALABLE_H); + /// IF_SVE_JC_4A ST4H {.H, .H, .H, .H }, , + /// theEmitter->emitIns_R_R_R_R(INS_sve_st4h, EA_SCALABLE, REG_V1, REG_P0, REG_R9, REG_R8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, ushort* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) => Store(mask, address, Value1,); + + /// + /// void svst1[_u32](svbool_t pg, uint32_t *base, svuint32_t data) + /// ST1W Zdata.S, Pg, [Xarray, Xindex, LSL #2] + /// ST1W Zdata.S, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1W {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P4, REG_R5, 6, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JD_4B ST1W {.}, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1W {.S }, , [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1W {.D }, , [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1W {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1W {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3B ST1W {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_JD_4C ST1W {.Q }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R8, REG_R7, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1W {.D }, , [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1W {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P5, REG_R1, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1W {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Store(Vector mask, uint* address, Vector data) => Store(mask, address, data); + + /// + /// void svst2[_u32](svbool_t pg, uint32_t *base, svuint32x2_t data) + /// ST2W {Zdata0.S, Zdata1.S}, Pg, [Xarray, Xindex, LSL #2] + /// ST2W {Zdata0.S, Zdata1.S}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST2W {.S, .S }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st2w, EA_SCALABLE, REG_V8, REG_P1, REG_R9, -16, INS_OPTS_SCALABLE_S); + /// IF_SVE_JC_4A ST2W {.S, .S }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st2w, EA_SCALABLE, REG_V0, REG_P2, REG_R8, REG_R7, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, uint* address, (Vector Value1, Vector Value2) data) => Store(mask, address, Value1,); + + /// + /// void svst3[_u32](svbool_t pg, uint32_t *base, svuint32x3_t data) + /// ST3W {Zdata0.S - Zdata2.S}, Pg, [Xarray, Xindex, LSL #2] + /// ST3W {Zdata0.S - Zdata2.S}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST3W {.S, .S, .S }, , [{, #, + /// theEmitter->emitIns_R_R_R_I(INS_sve_st3w, EA_SCALABLE, REG_V1, REG_P3, REG_R8, -24, INS_OPTS_SCALABLE_S); + /// IF_SVE_JC_4A ST3W {.S, .S, .S }, , [, , + /// theEmitter->emitIns_R_R_R_R(INS_sve_st3w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, uint* address, (Vector Value1, Vector Value2, Vector Value3) data) => Store(mask, address, Value1,); + + /// + /// void svst4[_u32](svbool_t pg, uint32_t *base, svuint32x4_t data) + /// ST4W {Zdata0.S - Zdata3.S}, Pg, [Xarray, Xindex, LSL #2] + /// ST4W {Zdata0.S - Zdata3.S}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST4W {.S, .S, .S, .S }, , [{, + /// theEmitter->emitIns_R_R_R_I(INS_sve_st4w, EA_SCALABLE, REG_V0, REG_P1, REG_R5, 28, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st4w, EA_SCALABLE, REG_V31, REG_P1, REG_R5, 28, INS_OPTS_SCALABLE_S); + /// IF_SVE_JC_4A ST4W {.S, .S, .S, .S }, , + /// theEmitter->emitIns_R_R_R_R(INS_sve_st4w, EA_SCALABLE, REG_V0, REG_P1, REG_R4, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, uint* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) => Store(mask, address, Value1,); + + /// + /// void svst1[_u64](svbool_t pg, uint64_t *base, svuint64_t data) + /// ST1D Zdata.D, Pg, [Xarray, Xindex, LSL #3] + /// ST1D Zdata.D, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JN_3C_D ST1D {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V2, REG_P1, REG_R0, 0, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JJ_4A ST1D {.D }, , [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1D {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P1, REG_R2, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P1, REG_R2, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JD_4C ST1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V1, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JD_4C_A ST1D {.Q }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P5, REG_R6, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1D {.D }, , [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_C ST1D {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V6, INS_OPTS_SCALABLE_D); + /// IF_SVE_JL_3A ST1D {.D }, , [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P7, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P7, REG_V4, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Store(Vector mask, ulong* address, Vector data) => Store(mask, address, data); + + /// + /// void svst2[_u64](svbool_t pg, uint64_t *base, svuint64x2_t data) + /// ST2D {Zdata0.D, Zdata1.D}, Pg, [Xarray, Xindex, LSL #3] + /// ST2D {Zdata0.D, Zdata1.D}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST2D {.D, .D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st2d, EA_SCALABLE, REG_V5, REG_P4, REG_R3, -16, INS_OPTS_SCALABLE_D); + /// IF_SVE_JC_4A ST2D {.D, .D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st2d, EA_SCALABLE, REG_V1, REG_P7, REG_R6, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, ulong* address, (Vector Value1, Vector Value2) data) => Store(mask, address, Value1,); + + /// + /// void svst3[_u64](svbool_t pg, uint64_t *base, svuint64x3_t data) + /// ST3D {Zdata0.D - Zdata2.D}, Pg, [Xarray, Xindex, LSL #3] + /// ST3D {Zdata0.D - Zdata2.D}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST3D {.D, .D, .D }, , [{, #, + /// theEmitter->emitIns_R_R_R_I(INS_sve_st3d, EA_SCALABLE, REG_V2, REG_P3, REG_R4, -24, INS_OPTS_SCALABLE_D); + /// IF_SVE_JC_4A ST3D {.D, .D, .D }, , [, , + /// theEmitter->emitIns_R_R_R_R(INS_sve_st3d, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R6, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, ulong* address, (Vector Value1, Vector Value2, Vector Value3) data) => Store(mask, address, Value1,); + + /// + /// void svst4[_u64](svbool_t pg, uint64_t *base, svuint64x4_t data) + /// ST4D {Zdata0.D - Zdata3.D}, Pg, [Xarray, Xindex, LSL #3] + /// ST4D {Zdata0.D - Zdata3.D}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST4D {.D, .D, .D, .D }, , [{, + /// theEmitter->emitIns_R_R_R_I(INS_sve_st4d, EA_SCALABLE, REG_V2, REG_P0, REG_R1, -32, INS_OPTS_SCALABLE_D); + /// IF_SVE_JC_4A ST4D {.D, .D, .D, .D }, , + /// theEmitter->emitIns_R_R_R_R(INS_sve_st4d, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, ulong* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) => Store(mask, address, Value1,); + + /// + /// void svst1[_f32](svbool_t pg, float32_t *base, svfloat32_t data) + /// ST1W Zdata.S, Pg, [Xarray, Xindex, LSL #2] + /// ST1W Zdata.S, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1W {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P4, REG_R5, 6, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JD_4B ST1W {.}, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1W {.S }, , [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1W {.D }, , [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1W {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1W {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3B ST1W {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_JD_4C ST1W {.Q }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R8, REG_R7, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1W {.D }, , [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1W {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P5, REG_R1, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1W {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Store(Vector mask, float* address, Vector data) => Store(mask, address, data); + + /// + /// void svst2[_f32](svbool_t pg, float32_t *base, svfloat32x2_t data) + /// ST2W {Zdata0.S, Zdata1.S}, Pg, [Xarray, Xindex, LSL #2] + /// ST2W {Zdata0.S, Zdata1.S}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST2W {.S, .S }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st2w, EA_SCALABLE, REG_V8, REG_P1, REG_R9, -16, INS_OPTS_SCALABLE_S); + /// IF_SVE_JC_4A ST2W {.S, .S }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st2w, EA_SCALABLE, REG_V0, REG_P2, REG_R8, REG_R7, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, float* address, (Vector Value1, Vector Value2) data) => Store(mask, address, Value1,); + + /// + /// void svst3[_f32](svbool_t pg, float32_t *base, svfloat32x3_t data) + /// ST3W {Zdata0.S - Zdata2.S}, Pg, [Xarray, Xindex, LSL #2] + /// ST3W {Zdata0.S - Zdata2.S}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST3W {.S, .S, .S }, , [{, #, + /// theEmitter->emitIns_R_R_R_I(INS_sve_st3w, EA_SCALABLE, REG_V1, REG_P3, REG_R8, -24, INS_OPTS_SCALABLE_S); + /// IF_SVE_JC_4A ST3W {.S, .S, .S }, , [, , + /// theEmitter->emitIns_R_R_R_R(INS_sve_st3w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, float* address, (Vector Value1, Vector Value2, Vector Value3) data) => Store(mask, address, Value1,); + + /// + /// void svst4[_f32](svbool_t pg, float32_t *base, svfloat32x4_t data) + /// ST4W {Zdata0.S - Zdata3.S}, Pg, [Xarray, Xindex, LSL #2] + /// ST4W {Zdata0.S - Zdata3.S}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST4W {.S, .S, .S, .S }, , [{, + /// theEmitter->emitIns_R_R_R_I(INS_sve_st4w, EA_SCALABLE, REG_V0, REG_P1, REG_R5, 28, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st4w, EA_SCALABLE, REG_V31, REG_P1, REG_R5, 28, INS_OPTS_SCALABLE_S); + /// IF_SVE_JC_4A ST4W {.S, .S, .S, .S }, , + /// theEmitter->emitIns_R_R_R_R(INS_sve_st4w, EA_SCALABLE, REG_V0, REG_P1, REG_R4, REG_R5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, float* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) => Store(mask, address, Value1,); + + /// + /// void svst1[_f64](svbool_t pg, float64_t *base, svfloat64_t data) + /// ST1D Zdata.D, Pg, [Xarray, Xindex, LSL #3] + /// ST1D Zdata.D, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V1, REG_P2, REG_R3, 4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JN_3C_D ST1D {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V2, REG_P1, REG_R0, 0, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JJ_4A ST1D {.D }, , [, .D, #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1D {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P1, REG_R2, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P1, REG_R2, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JD_4C ST1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V1, REG_P4, REG_R5, REG_R6, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JD_4C_A ST1D {.Q }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P5, REG_R6, REG_R1, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1D {.D }, , [, .D, LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V0, REG_P3, REG_R2, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_C ST1D {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, REG_V6, INS_OPTS_SCALABLE_D); + /// IF_SVE_JL_3A ST1D {.D }, , [.D{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P7, REG_V4, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1d, EA_SCALABLE, REG_V3, REG_P7, REG_V4, 248, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Store(Vector mask, double* address, Vector data) => Store(mask, address, data); + + /// + /// void svst2[_f64](svbool_t pg, float64_t *base, svfloat64x2_t data) + /// ST2D {Zdata0.D, Zdata1.D}, Pg, [Xarray, Xindex, LSL #3] + /// ST2D {Zdata0.D, Zdata1.D}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST2D {.D, .D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st2d, EA_SCALABLE, REG_V5, REG_P4, REG_R3, -16, INS_OPTS_SCALABLE_D); + /// IF_SVE_JC_4A ST2D {.D, .D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st2d, EA_SCALABLE, REG_V1, REG_P7, REG_R6, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, double* address, (Vector Value1, Vector Value2) data) => Store(mask, address, Value1,); + + /// + /// void svst3[_f64](svbool_t pg, float64_t *base, svfloat64x3_t data) + /// ST3D {Zdata0.D - Zdata2.D}, Pg, [Xarray, Xindex, LSL #3] + /// ST3D {Zdata0.D - Zdata2.D}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST3D {.D, .D, .D }, , [{, #, + /// theEmitter->emitIns_R_R_R_I(INS_sve_st3d, EA_SCALABLE, REG_V2, REG_P3, REG_R4, -24, INS_OPTS_SCALABLE_D); + /// IF_SVE_JC_4A ST3D {.D, .D, .D }, , [, , + /// theEmitter->emitIns_R_R_R_R(INS_sve_st3d, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R6, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, double* address, (Vector Value1, Vector Value2, Vector Value3) data) => Store(mask, address, Value1,); + + /// + /// void svst4[_f64](svbool_t pg, float64_t *base, svfloat64x4_t data) + /// ST4D {Zdata0.D - Zdata3.D}, Pg, [Xarray, Xindex, LSL #3] + /// ST4D {Zdata0.D - Zdata3.D}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST4D {.D, .D, .D, .D }, , [{, + /// theEmitter->emitIns_R_R_R_I(INS_sve_st4d, EA_SCALABLE, REG_V2, REG_P0, REG_R1, -32, INS_OPTS_SCALABLE_D); + /// IF_SVE_JC_4A ST4D {.D, .D, .D, .D }, , + /// theEmitter->emitIns_R_R_R_R(INS_sve_st4d, EA_SCALABLE, REG_V5, REG_P2, REG_R1, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, double* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) => Store(mask, address, Value1,); + + + /// StoreNarrowing : Truncate to 8 bits and store + + /// + /// void svst1b[_s16](svbool_t pg, int8_t *base, svint16_t data) + /// ST1B Zdata.H, Pg, [Xarray, Xindex] + /// ST1B Zdata.H, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1B {.}, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_R0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_R2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P5, REG_R7, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P0, REG_R1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4A ST1B {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JK_4A_B ST1B {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1B {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4B ST1B {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P3, REG_R0, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1B {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe void StoreNarrowing(Vector mask, sbyte* address, Vector data) => StoreNarrowing(mask, address, data); + + /// + /// void svst1b[_s32](svbool_t pg, int8_t *base, svint32_t data) + /// ST1B Zdata.S, Pg, [Xarray, Xindex] + /// ST1B Zdata.S, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1B {.}, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_R0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_R2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P5, REG_R7, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P0, REG_R1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4A ST1B {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JK_4A_B ST1B {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1B {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4B ST1B {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P3, REG_R0, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1B {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe void StoreNarrowing(Vector mask, sbyte* address, Vector data) => StoreNarrowing(mask, address, data); + + /// + /// void svst1h[_s32](svbool_t pg, int16_t *base, svint32_t data) + /// ST1H Zdata.S, Pg, [Xarray, Xindex, LSL #1] + /// ST1H Zdata.S, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void StoreNarrowing(Vector mask, short* address, Vector data) => StoreNarrowing(mask, address, data); + + /// + /// void svst1b[_s64](svbool_t pg, int8_t *base, svint64_t data) + /// ST1B Zdata.D, Pg, [Xarray, Xindex] + /// ST1B Zdata.D, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1B {.}, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_R0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_R2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P5, REG_R7, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P0, REG_R1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4A ST1B {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JK_4A_B ST1B {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1B {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4B ST1B {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P3, REG_R0, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1B {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe void StoreNarrowing(Vector mask, sbyte* address, Vector data) => StoreNarrowing(mask, address, data); + + /// + /// void svst1h[_s64](svbool_t pg, int16_t *base, svint64_t data) + /// ST1H Zdata.D, Pg, [Xarray, Xindex, LSL #1] + /// ST1H Zdata.D, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void StoreNarrowing(Vector mask, short* address, Vector data) => StoreNarrowing(mask, address, data); + + /// + /// void svst1w[_s64](svbool_t pg, int32_t *base, svint64_t data) + /// ST1W Zdata.D, Pg, [Xarray, Xindex, LSL #2] + /// ST1W Zdata.D, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1W {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P4, REG_R5, 6, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JD_4B ST1W {.}, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1W {.S }, , [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1W {.D }, , [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1W {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1W {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3B ST1W {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_JD_4C ST1W {.Q }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R8, REG_R7, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1W {.D }, , [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1W {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P5, REG_R1, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1W {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe void StoreNarrowing(Vector mask, int* address, Vector data) => StoreNarrowing(mask, address, data); + + /// + /// void svst1b[_u16](svbool_t pg, uint8_t *base, svuint16_t data) + /// ST1B Zdata.H, Pg, [Xarray, Xindex] + /// ST1B Zdata.H, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1B {.}, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_R0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_R2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P5, REG_R7, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P0, REG_R1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4A ST1B {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JK_4A_B ST1B {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1B {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4B ST1B {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P3, REG_R0, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1B {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe void StoreNarrowing(Vector mask, byte* address, Vector data) => StoreNarrowing(mask, address, data); + + /// + /// void svst1b[_u32](svbool_t pg, uint8_t *base, svuint32_t data) + /// ST1B Zdata.S, Pg, [Xarray, Xindex] + /// ST1B Zdata.S, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1B {.}, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_R0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_R2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P5, REG_R7, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P0, REG_R1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4A ST1B {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JK_4A_B ST1B {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1B {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4B ST1B {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P3, REG_R0, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1B {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe void StoreNarrowing(Vector mask, byte* address, Vector data) => StoreNarrowing(mask, address, data); + + /// + /// void svst1h[_u32](svbool_t pg, uint16_t *base, svuint32_t data) + /// ST1H Zdata.S, Pg, [Xarray, Xindex, LSL #1] + /// ST1H Zdata.S, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void StoreNarrowing(Vector mask, ushort* address, Vector data) => StoreNarrowing(mask, address, data); + + /// + /// void svst1b[_u64](svbool_t pg, uint8_t *base, svuint64_t data) + /// ST1B Zdata.D, Pg, [Xarray, Xindex] + /// ST1B Zdata.D, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1B {.}, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P1, REG_R2, REG_R0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P5, REG_R6, REG_R2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P5, REG_R7, REG_R4, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P0, REG_R1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4A ST1B {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V4, REG_P2, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JK_4A_B ST1B {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V0, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1B {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V3, REG_P2, REG_R1, 0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JK_4B ST1B {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1b, EA_SCALABLE, REG_V6, REG_P3, REG_R0, REG_V4, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1B {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1b, EA_SCALABLE, REG_V1, REG_P2, REG_V3, 31, INS_OPTS_SCALABLE_D); + /// + public static unsafe void StoreNarrowing(Vector mask, byte* address, Vector data) => StoreNarrowing(mask, address, data); + + /// + /// void svst1h[_u64](svbool_t pg, uint16_t *base, svuint64_t data) + /// ST1H Zdata.D, Pg, [Xarray, Xindex, LSL #1] + /// ST1H Zdata.D, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void StoreNarrowing(Vector mask, ushort* address, Vector data) => StoreNarrowing(mask, address, data); + + /// + /// void svst1w[_u64](svbool_t pg, uint32_t *base, svuint64_t data) + /// ST1W Zdata.D, Pg, [Xarray, Xindex, LSL #2] + /// ST1W Zdata.D, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JN_3C ST1W {.Q }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P4, REG_R5, 6, INS_OPTS_SCALABLE_Q); + /// IF_SVE_JD_4B ST1W {.}, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1W {.S }, , [, .S, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P3, REG_R1, REG_V2, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1W {.D }, , [, .D, #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P4, REG_R2, REG_V3, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1W {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V0, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1W {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V2, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3B ST1W {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R3, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_JD_4C ST1W {.Q }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P1, REG_R8, REG_R7, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B ST1W {.D }, , [, .D, LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V2, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1W {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1w, EA_SCALABLE, REG_V3, REG_P5, REG_R1, REG_V0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1W {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1w, EA_SCALABLE, REG_V5, REG_P4, REG_V1, 124, INS_OPTS_SCALABLE_D); + /// + public static unsafe void StoreNarrowing(Vector mask, uint* address, Vector data) => StoreNarrowing(mask, address, data); + + + /// StoreNonTemporal : Non-truncating store, non-temporal + + /// + /// void svstnt1[_s8](svbool_t pg, int8_t *base, svint8_t data) + /// STNT1B Zdata.B, Pg, [Xarray, Xindex] + /// STNT1B Zdata.B, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1B {.B }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, 4, INS_OPTS_SCALABLE_B); + /// IF_SVE_IZ_4A STNT1B {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P2, REG_V3, REG_R4, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1B {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P4, REG_V6, REG_R8, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P4, REG_V6, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1B {.B }, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V6, REG_P5, REG_R4, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe void StoreNonTemporal(Vector mask, sbyte* address, Vector data) => StoreNonTemporal(mask, address, data); + + /// + /// void svstnt1[_s16](svbool_t pg, int16_t *base, svint16_t data) + /// STNT1H Zdata.H, Pg, [Xarray, Xindex, LSL #1] + /// STNT1H Zdata.H, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1H {.H }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1h, EA_SCALABLE, REG_V9, REG_P1, REG_R0, -5, INS_OPTS_SCALABLE_H); + /// IF_SVE_IZ_4A STNT1H {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V2, REG_P7, REG_V6, REG_R5, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1H {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V5, REG_P3, REG_V1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1H {.H }, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void StoreNonTemporal(Vector mask, short* address, Vector data) => StoreNonTemporal(mask, address, data); + + /// + /// void svstnt1[_s32](svbool_t pg, int32_t *base, svint32_t data) + /// STNT1W Zdata.S, Pg, [Xarray, Xindex, LSL #2] + /// STNT1W Zdata.S, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1W {.S }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P0, REG_R2, -7, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A STNT1W {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_R0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1W {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V3, REG_P1, REG_V2, REG_R0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1W {.S }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P5, REG_R6, REG_R7, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void StoreNonTemporal(Vector mask, int* address, Vector data) => StoreNonTemporal(mask, address, data); + + /// + /// void svstnt1[_s64](svbool_t pg, int64_t *base, svint64_t data) + /// STNT1D Zdata.D, Pg, [Xarray, Xindex, LSL #3] + /// STNT1D Zdata.D, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1d, EA_SCALABLE, REG_V8, REG_P7, REG_R6, 5, INS_OPTS_SCALABLE_D); + /// IF_SVE_JA_4A STNT1D {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V0, REG_P4, REG_V5, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V7, REG_P6, REG_R5, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void StoreNonTemporal(Vector mask, long* address, Vector data) => StoreNonTemporal(mask, address, data); + + /// + /// void svstnt1[_u8](svbool_t pg, uint8_t *base, svuint8_t data) + /// STNT1B Zdata.B, Pg, [Xarray, Xindex] + /// STNT1B Zdata.B, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1B {.B }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, 4, INS_OPTS_SCALABLE_B); + /// IF_SVE_IZ_4A STNT1B {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P2, REG_V3, REG_R4, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1B {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P4, REG_V6, REG_R8, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P4, REG_V6, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1B {.B }, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V6, REG_P5, REG_R4, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe void StoreNonTemporal(Vector mask, byte* address, Vector data) => StoreNonTemporal(mask, address, data); + + /// + /// void svstnt1[_u16](svbool_t pg, uint16_t *base, svuint16_t data) + /// STNT1H Zdata.H, Pg, [Xarray, Xindex, LSL #1] + /// STNT1H Zdata.H, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1H {.H }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1h, EA_SCALABLE, REG_V9, REG_P1, REG_R0, -5, INS_OPTS_SCALABLE_H); + /// IF_SVE_IZ_4A STNT1H {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V2, REG_P7, REG_V6, REG_R5, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1H {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V5, REG_P3, REG_V1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1H {.H }, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void StoreNonTemporal(Vector mask, ushort* address, Vector data) => StoreNonTemporal(mask, address, data); + + /// + /// void svstnt1[_u32](svbool_t pg, uint32_t *base, svuint32_t data) + /// STNT1W Zdata.S, Pg, [Xarray, Xindex, LSL #2] + /// STNT1W Zdata.S, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1W {.S }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P0, REG_R2, -7, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A STNT1W {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_R0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1W {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V3, REG_P1, REG_V2, REG_R0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1W {.S }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P5, REG_R6, REG_R7, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void StoreNonTemporal(Vector mask, uint* address, Vector data) => StoreNonTemporal(mask, address, data); + + /// + /// void svstnt1[_u64](svbool_t pg, uint64_t *base, svuint64_t data) + /// STNT1D Zdata.D, Pg, [Xarray, Xindex, LSL #3] + /// STNT1D Zdata.D, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1d, EA_SCALABLE, REG_V8, REG_P7, REG_R6, 5, INS_OPTS_SCALABLE_D); + /// IF_SVE_JA_4A STNT1D {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V0, REG_P4, REG_V5, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V7, REG_P6, REG_R5, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void StoreNonTemporal(Vector mask, ulong* address, Vector data) => StoreNonTemporal(mask, address, data); + + /// + /// void svstnt1[_f32](svbool_t pg, float32_t *base, svfloat32_t data) + /// STNT1W Zdata.S, Pg, [Xarray, Xindex, LSL #2] + /// STNT1W Zdata.S, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1W {.S }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P0, REG_R2, -7, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A STNT1W {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_R0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1W {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V3, REG_P1, REG_V2, REG_R0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1W {.S }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P5, REG_R6, REG_R7, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void StoreNonTemporal(Vector mask, float* address, Vector data) => StoreNonTemporal(mask, address, data); + + /// + /// void svstnt1[_f64](svbool_t pg, float64_t *base, svfloat64_t data) + /// STNT1D Zdata.D, Pg, [Xarray, Xindex, LSL #3] + /// STNT1D Zdata.D, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1d, EA_SCALABLE, REG_V8, REG_P7, REG_R6, 5, INS_OPTS_SCALABLE_D); + /// IF_SVE_JA_4A STNT1D {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V0, REG_P4, REG_V5, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V7, REG_P6, REG_R5, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void StoreNonTemporal(Vector mask, double* address, Vector data) => StoreNonTemporal(mask, address, data); + + + /// Subtract : Subtract + + /// + /// svint8_t svsub[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// SUB Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; SUB Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svsub[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// SUB Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// SUBR Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// SUB Zresult.B, Zop1.B, Zop2.B + /// svint8_t svsub[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; SUB Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; SUBR Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_AB_3A SUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sub, EA_SCALABLE, REG_V15, REG_P7, REG_V29, INS_OPTS_SCALABLE_H); + /// IF_SVE_AT_3A SUB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sub, EA_SCALABLE, REG_V19, REG_V7, REG_V13, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A SUB ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_sub, EA_SCALABLE, REG_V3, 128, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Subtract(Vector left, Vector right) => Subtract(left, right); + + /// + /// svint16_t svsub[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// SUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; SUB Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svsub[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// SUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// SUBR Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// SUB Zresult.H, Zop1.H, Zop2.H + /// svint16_t svsub[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; SUB Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; SUBR Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_AB_3A SUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sub, EA_SCALABLE, REG_V15, REG_P7, REG_V29, INS_OPTS_SCALABLE_H); + /// IF_SVE_AT_3A SUB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sub, EA_SCALABLE, REG_V19, REG_V7, REG_V13, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A SUB ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_sub, EA_SCALABLE, REG_V3, 128, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Subtract(Vector left, Vector right) => Subtract(left, right); + + /// + /// svint32_t svsub[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// SUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; SUB Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svsub[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// SUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// SUBR Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// SUB Zresult.S, Zop1.S, Zop2.S + /// svint32_t svsub[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; SUB Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; SUBR Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_AB_3A SUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sub, EA_SCALABLE, REG_V15, REG_P7, REG_V29, INS_OPTS_SCALABLE_H); + /// IF_SVE_AT_3A SUB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sub, EA_SCALABLE, REG_V19, REG_V7, REG_V13, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A SUB ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_sub, EA_SCALABLE, REG_V3, 128, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Subtract(Vector left, Vector right) => Subtract(left, right); + + /// + /// svint64_t svsub[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// SUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; SUB Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svsub[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// SUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// SUBR Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// SUB Zresult.D, Zop1.D, Zop2.D + /// svint64_t svsub[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; SUB Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; SUBR Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_AB_3A SUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sub, EA_SCALABLE, REG_V15, REG_P7, REG_V29, INS_OPTS_SCALABLE_H); + /// IF_SVE_AT_3A SUB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sub, EA_SCALABLE, REG_V19, REG_V7, REG_V13, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A SUB ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_sub, EA_SCALABLE, REG_V3, 128, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Subtract(Vector left, Vector right) => Subtract(left, right); + + /// + /// svuint8_t svsub[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// SUB Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; SUB Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svsub[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// SUB Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// SUBR Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// SUB Zresult.B, Zop1.B, Zop2.B + /// svuint8_t svsub[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; SUB Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; SUBR Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_AB_3A SUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sub, EA_SCALABLE, REG_V15, REG_P7, REG_V29, INS_OPTS_SCALABLE_H); + /// IF_SVE_AT_3A SUB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sub, EA_SCALABLE, REG_V19, REG_V7, REG_V13, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A SUB ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_sub, EA_SCALABLE, REG_V3, 128, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Subtract(Vector left, Vector right) => Subtract(left, right); + + /// + /// svuint16_t svsub[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// SUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; SUB Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svsub[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// SUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// SUBR Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// SUB Zresult.H, Zop1.H, Zop2.H + /// svuint16_t svsub[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; SUB Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; SUBR Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_AB_3A SUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sub, EA_SCALABLE, REG_V15, REG_P7, REG_V29, INS_OPTS_SCALABLE_H); + /// IF_SVE_AT_3A SUB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sub, EA_SCALABLE, REG_V19, REG_V7, REG_V13, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A SUB ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_sub, EA_SCALABLE, REG_V3, 128, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Subtract(Vector left, Vector right) => Subtract(left, right); + + /// + /// svuint32_t svsub[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// SUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; SUB Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svsub[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// SUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// SUBR Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// SUB Zresult.S, Zop1.S, Zop2.S + /// svuint32_t svsub[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; SUB Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; SUBR Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_AB_3A SUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sub, EA_SCALABLE, REG_V15, REG_P7, REG_V29, INS_OPTS_SCALABLE_H); + /// IF_SVE_AT_3A SUB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sub, EA_SCALABLE, REG_V19, REG_V7, REG_V13, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A SUB ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_sub, EA_SCALABLE, REG_V3, 128, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Subtract(Vector left, Vector right) => Subtract(left, right); + + /// + /// svuint64_t svsub[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// SUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; SUB Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svsub[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// SUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// SUBR Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// SUB Zresult.D, Zop1.D, Zop2.D + /// svuint64_t svsub[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; SUB Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; SUBR Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_AB_3A SUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sub, EA_SCALABLE, REG_V15, REG_P7, REG_V29, INS_OPTS_SCALABLE_H); + /// IF_SVE_AT_3A SUB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sub, EA_SCALABLE, REG_V19, REG_V7, REG_V13, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A SUB ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_sub, EA_SCALABLE, REG_V3, 128, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Subtract(Vector left, Vector right) => Subtract(left, right); + + /// + /// svfloat32_t svsub[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FSUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; FSUB Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svfloat32_t svsub[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FSUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// FSUBR Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// FSUB Zresult.S, Zop1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; FSUB Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svfloat32_t svsub[_f32]_z(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; FSUB Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; FSUBR Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FSUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fsub, EA_SCALABLE, REG_V5, REG_P5, REG_V30, INS_OPTS_SCALABLE_S); + /// IF_SVE_HK_3A FSUB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fsub, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_HM_2A FSUB ., /M, ., + /// theEmitter->emitIns_R_R_F(INS_sve_fsub, EA_SCALABLE, REG_V7, REG_P2, 0.5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_F(INS_sve_fsub, EA_SCALABLE, REG_V7, REG_P2, 1.0, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Subtract(Vector left, Vector right) => Subtract(left, right); + + /// + /// svfloat64_t svsub[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FSUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; FSUB Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svfloat64_t svsub[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FSUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// FSUBR Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// FSUB Zresult.D, Zop1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; FSUB Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svfloat64_t svsub[_f64]_z(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; FSUB Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; FSUBR Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FSUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fsub, EA_SCALABLE, REG_V5, REG_P5, REG_V30, INS_OPTS_SCALABLE_S); + /// IF_SVE_HK_3A FSUB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fsub, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_HM_2A FSUB ., /M, ., + /// theEmitter->emitIns_R_R_F(INS_sve_fsub, EA_SCALABLE, REG_V7, REG_P2, 0.5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_F(INS_sve_fsub, EA_SCALABLE, REG_V7, REG_P2, 1.0, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Subtract(Vector left, Vector right) => Subtract(left, right); + + + + /// SubtractSaturate : Saturating subtract + + /// + /// svint8_t svqsub[_s8](svint8_t op1, svint8_t op2) + /// SQSUB Zresult.B, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_ET_3A SQSUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqsub, EA_SCALABLE, REG_V29, REG_P0, REG_V24, INS_OPTS_SCALABLE_H); + /// IF_SVE_AT_3A SQSUB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqsub, EA_SCALABLE, REG_V7, REG_V0, REG_V31, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A SQSUB ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_sqsub, EA_SCALABLE, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svint16_t svqsub[_s16](svint16_t op1, svint16_t op2) + /// SQSUB Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_ET_3A SQSUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqsub, EA_SCALABLE, REG_V29, REG_P0, REG_V24, INS_OPTS_SCALABLE_H); + /// IF_SVE_AT_3A SQSUB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqsub, EA_SCALABLE, REG_V7, REG_V0, REG_V31, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A SQSUB ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_sqsub, EA_SCALABLE, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svint32_t svqsub[_s32](svint32_t op1, svint32_t op2) + /// SQSUB Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_ET_3A SQSUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqsub, EA_SCALABLE, REG_V29, REG_P0, REG_V24, INS_OPTS_SCALABLE_H); + /// IF_SVE_AT_3A SQSUB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqsub, EA_SCALABLE, REG_V7, REG_V0, REG_V31, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A SQSUB ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_sqsub, EA_SCALABLE, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svint64_t svqsub[_s64](svint64_t op1, svint64_t op2) + /// SQSUB Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_ET_3A SQSUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqsub, EA_SCALABLE, REG_V29, REG_P0, REG_V24, INS_OPTS_SCALABLE_H); + /// IF_SVE_AT_3A SQSUB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqsub, EA_SCALABLE, REG_V7, REG_V0, REG_V31, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A SQSUB ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_sqsub, EA_SCALABLE, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svuint8_t svqsub[_u8](svuint8_t op1, svuint8_t op2) + /// UQSUB Zresult.B, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_ET_3A UQSUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqsub, EA_SCALABLE, REG_V1, REG_P4, REG_V28, INS_OPTS_SCALABLE_D); + /// IF_SVE_AT_3A UQSUB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqsub, EA_SCALABLE, REG_V31, REG_V31, REG_V31, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A UQSUB ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_uqsub, EA_SCALABLE, REG_V6, 255, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_SHIFT); + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svuint16_t svqsub[_u16](svuint16_t op1, svuint16_t op2) + /// UQSUB Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_ET_3A UQSUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqsub, EA_SCALABLE, REG_V1, REG_P4, REG_V28, INS_OPTS_SCALABLE_D); + /// IF_SVE_AT_3A UQSUB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqsub, EA_SCALABLE, REG_V31, REG_V31, REG_V31, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A UQSUB ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_uqsub, EA_SCALABLE, REG_V6, 255, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_SHIFT); + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svuint32_t svqsub[_u32](svuint32_t op1, svuint32_t op2) + /// UQSUB Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_ET_3A UQSUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqsub, EA_SCALABLE, REG_V1, REG_P4, REG_V28, INS_OPTS_SCALABLE_D); + /// IF_SVE_AT_3A UQSUB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqsub, EA_SCALABLE, REG_V31, REG_V31, REG_V31, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A UQSUB ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_uqsub, EA_SCALABLE, REG_V6, 255, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_SHIFT); + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svuint64_t svqsub[_u64](svuint64_t op1, svuint64_t op2) + /// UQSUB Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_ET_3A UQSUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqsub, EA_SCALABLE, REG_V1, REG_P4, REG_V28, INS_OPTS_SCALABLE_D); + /// IF_SVE_AT_3A UQSUB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqsub, EA_SCALABLE, REG_V31, REG_V31, REG_V31, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A UQSUB ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_uqsub, EA_SCALABLE, REG_V6, 255, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_SHIFT); + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + + /// TestAnyTrue : Test whether any active element is true + + /// + /// bool svptest_any(svbool_t pg, svbool_t op) + /// PTEST + /// + /// codegenarm64test: + /// IF_SVE_DI_2A PTEST , .B + /// theEmitter->emitIns_R_R(INS_sve_ptest, EA_SCALABLE, REG_P2, REG_P14, INS_OPTS_SCALABLE_B); + /// + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask) => TestAnyTrue(leftMask, rightMask); + + /// + /// bool svptest_any(svbool_t pg, svbool_t op) + /// PTEST + /// + /// codegenarm64test: + /// IF_SVE_DI_2A PTEST , .B + /// theEmitter->emitIns_R_R(INS_sve_ptest, EA_SCALABLE, REG_P2, REG_P14, INS_OPTS_SCALABLE_B); + /// + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask) => TestAnyTrue(leftMask, rightMask); + + /// + /// bool svptest_any(svbool_t pg, svbool_t op) + /// PTEST + /// + /// codegenarm64test: + /// IF_SVE_DI_2A PTEST , .B + /// theEmitter->emitIns_R_R(INS_sve_ptest, EA_SCALABLE, REG_P2, REG_P14, INS_OPTS_SCALABLE_B); + /// + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask) => TestAnyTrue(leftMask, rightMask); + + /// + /// bool svptest_any(svbool_t pg, svbool_t op) + /// PTEST + /// + /// codegenarm64test: + /// IF_SVE_DI_2A PTEST , .B + /// theEmitter->emitIns_R_R(INS_sve_ptest, EA_SCALABLE, REG_P2, REG_P14, INS_OPTS_SCALABLE_B); + /// + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask) => TestAnyTrue(leftMask, rightMask); + + /// + /// bool svptest_any(svbool_t pg, svbool_t op) + /// PTEST + /// + /// codegenarm64test: + /// IF_SVE_DI_2A PTEST , .B + /// theEmitter->emitIns_R_R(INS_sve_ptest, EA_SCALABLE, REG_P2, REG_P14, INS_OPTS_SCALABLE_B); + /// + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask) => TestAnyTrue(leftMask, rightMask); + + /// + /// bool svptest_any(svbool_t pg, svbool_t op) + /// PTEST + /// + /// codegenarm64test: + /// IF_SVE_DI_2A PTEST , .B + /// theEmitter->emitIns_R_R(INS_sve_ptest, EA_SCALABLE, REG_P2, REG_P14, INS_OPTS_SCALABLE_B); + /// + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask) => TestAnyTrue(leftMask, rightMask); + + /// + /// bool svptest_any(svbool_t pg, svbool_t op) + /// PTEST + /// + /// codegenarm64test: + /// IF_SVE_DI_2A PTEST , .B + /// theEmitter->emitIns_R_R(INS_sve_ptest, EA_SCALABLE, REG_P2, REG_P14, INS_OPTS_SCALABLE_B); + /// + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask) => TestAnyTrue(leftMask, rightMask); + + /// + /// bool svptest_any(svbool_t pg, svbool_t op) + /// PTEST + /// + /// codegenarm64test: + /// IF_SVE_DI_2A PTEST , .B + /// theEmitter->emitIns_R_R(INS_sve_ptest, EA_SCALABLE, REG_P2, REG_P14, INS_OPTS_SCALABLE_B); + /// + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask) => TestAnyTrue(leftMask, rightMask); + + + /// TestFirstTrue : Test whether the first active element is true + + /// + /// bool svptest_first(svbool_t pg, svbool_t op) + /// PTEST + /// + /// codegenarm64test: + /// IF_SVE_DI_2A PTEST , .B + /// theEmitter->emitIns_R_R(INS_sve_ptest, EA_SCALABLE, REG_P2, REG_P14, INS_OPTS_SCALABLE_B); + /// + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask) => TestFirstTrue(leftMask, rightMask); + + /// + /// bool svptest_first(svbool_t pg, svbool_t op) + /// PTEST + /// + /// codegenarm64test: + /// IF_SVE_DI_2A PTEST , .B + /// theEmitter->emitIns_R_R(INS_sve_ptest, EA_SCALABLE, REG_P2, REG_P14, INS_OPTS_SCALABLE_B); + /// + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask) => TestFirstTrue(leftMask, rightMask); + + /// + /// bool svptest_first(svbool_t pg, svbool_t op) + /// PTEST + /// + /// codegenarm64test: + /// IF_SVE_DI_2A PTEST , .B + /// theEmitter->emitIns_R_R(INS_sve_ptest, EA_SCALABLE, REG_P2, REG_P14, INS_OPTS_SCALABLE_B); + /// + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask) => TestFirstTrue(leftMask, rightMask); + + /// + /// bool svptest_first(svbool_t pg, svbool_t op) + /// PTEST + /// + /// codegenarm64test: + /// IF_SVE_DI_2A PTEST , .B + /// theEmitter->emitIns_R_R(INS_sve_ptest, EA_SCALABLE, REG_P2, REG_P14, INS_OPTS_SCALABLE_B); + /// + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask) => TestFirstTrue(leftMask, rightMask); + + /// + /// bool svptest_first(svbool_t pg, svbool_t op) + /// PTEST + /// + /// codegenarm64test: + /// IF_SVE_DI_2A PTEST , .B + /// theEmitter->emitIns_R_R(INS_sve_ptest, EA_SCALABLE, REG_P2, REG_P14, INS_OPTS_SCALABLE_B); + /// + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask) => TestFirstTrue(leftMask, rightMask); + + /// + /// bool svptest_first(svbool_t pg, svbool_t op) + /// PTEST + /// + /// codegenarm64test: + /// IF_SVE_DI_2A PTEST , .B + /// theEmitter->emitIns_R_R(INS_sve_ptest, EA_SCALABLE, REG_P2, REG_P14, INS_OPTS_SCALABLE_B); + /// + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask) => TestFirstTrue(leftMask, rightMask); + + /// + /// bool svptest_first(svbool_t pg, svbool_t op) + /// PTEST + /// + /// codegenarm64test: + /// IF_SVE_DI_2A PTEST , .B + /// theEmitter->emitIns_R_R(INS_sve_ptest, EA_SCALABLE, REG_P2, REG_P14, INS_OPTS_SCALABLE_B); + /// + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask) => TestFirstTrue(leftMask, rightMask); + + /// + /// bool svptest_first(svbool_t pg, svbool_t op) + /// PTEST + /// + /// codegenarm64test: + /// IF_SVE_DI_2A PTEST , .B + /// theEmitter->emitIns_R_R(INS_sve_ptest, EA_SCALABLE, REG_P2, REG_P14, INS_OPTS_SCALABLE_B); + /// + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask) => TestFirstTrue(leftMask, rightMask); + + + /// TestLastTrue : Test whether the last active element is true + + /// + /// bool svptest_last(svbool_t pg, svbool_t op) + /// PTEST + /// + /// codegenarm64test: + /// IF_SVE_DI_2A PTEST , .B + /// theEmitter->emitIns_R_R(INS_sve_ptest, EA_SCALABLE, REG_P2, REG_P14, INS_OPTS_SCALABLE_B); + /// + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask) => TestLastTrue(leftMask, rightMask); + + /// + /// bool svptest_last(svbool_t pg, svbool_t op) + /// PTEST + /// + /// codegenarm64test: + /// IF_SVE_DI_2A PTEST , .B + /// theEmitter->emitIns_R_R(INS_sve_ptest, EA_SCALABLE, REG_P2, REG_P14, INS_OPTS_SCALABLE_B); + /// + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask) => TestLastTrue(leftMask, rightMask); + + /// + /// bool svptest_last(svbool_t pg, svbool_t op) + /// PTEST + /// + /// codegenarm64test: + /// IF_SVE_DI_2A PTEST , .B + /// theEmitter->emitIns_R_R(INS_sve_ptest, EA_SCALABLE, REG_P2, REG_P14, INS_OPTS_SCALABLE_B); + /// + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask) => TestLastTrue(leftMask, rightMask); + + /// + /// bool svptest_last(svbool_t pg, svbool_t op) + /// PTEST + /// + /// codegenarm64test: + /// IF_SVE_DI_2A PTEST , .B + /// theEmitter->emitIns_R_R(INS_sve_ptest, EA_SCALABLE, REG_P2, REG_P14, INS_OPTS_SCALABLE_B); + /// + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask) => TestLastTrue(leftMask, rightMask); + + /// + /// bool svptest_last(svbool_t pg, svbool_t op) + /// PTEST + /// + /// codegenarm64test: + /// IF_SVE_DI_2A PTEST , .B + /// theEmitter->emitIns_R_R(INS_sve_ptest, EA_SCALABLE, REG_P2, REG_P14, INS_OPTS_SCALABLE_B); + /// + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask) => TestLastTrue(leftMask, rightMask); + + /// + /// bool svptest_last(svbool_t pg, svbool_t op) + /// PTEST + /// + /// codegenarm64test: + /// IF_SVE_DI_2A PTEST , .B + /// theEmitter->emitIns_R_R(INS_sve_ptest, EA_SCALABLE, REG_P2, REG_P14, INS_OPTS_SCALABLE_B); + /// + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask) => TestLastTrue(leftMask, rightMask); + + /// + /// bool svptest_last(svbool_t pg, svbool_t op) + /// PTEST + /// + /// codegenarm64test: + /// IF_SVE_DI_2A PTEST , .B + /// theEmitter->emitIns_R_R(INS_sve_ptest, EA_SCALABLE, REG_P2, REG_P14, INS_OPTS_SCALABLE_B); + /// + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask) => TestLastTrue(leftMask, rightMask); + + /// + /// bool svptest_last(svbool_t pg, svbool_t op) + /// PTEST + /// + /// codegenarm64test: + /// IF_SVE_DI_2A PTEST , .B + /// theEmitter->emitIns_R_R(INS_sve_ptest, EA_SCALABLE, REG_P2, REG_P14, INS_OPTS_SCALABLE_B); + /// + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask) => TestLastTrue(leftMask, rightMask); + + + /// TransposeEven : Interleave even elements from two inputs + + /// + /// svint8_t svtrn1[_s8](svint8_t op1, svint8_t op2) + /// TRN1 Zresult.B, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_P1, REG_P3, REG_P4, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) => TransposeEven(left, right); + + /// + /// svint16_t svtrn1[_s16](svint16_t op1, svint16_t op2) + /// TRN1 Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_P1, REG_P3, REG_P4, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) => TransposeEven(left, right); + + /// + /// svint32_t svtrn1[_s32](svint32_t op1, svint32_t op2) + /// TRN1 Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_P1, REG_P3, REG_P4, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) => TransposeEven(left, right); + + /// + /// svint64_t svtrn1[_s64](svint64_t op1, svint64_t op2) + /// TRN1 Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_P1, REG_P3, REG_P4, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) => TransposeEven(left, right); + + /// + /// svuint8_t svtrn1[_u8](svuint8_t op1, svuint8_t op2) + /// TRN1 Zresult.B, Zop1.B, Zop2.B + /// svbool_t svtrn1_b8(svbool_t op1, svbool_t op2) + /// TRN1 Presult.B, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_P1, REG_P3, REG_P4, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) => TransposeEven(left, right); + + /// + /// svuint16_t svtrn1[_u16](svuint16_t op1, svuint16_t op2) + /// TRN1 Zresult.H, Zop1.H, Zop2.H + /// svbool_t svtrn1_b16(svbool_t op1, svbool_t op2) + /// TRN1 Presult.H, Pop1.H, Pop2.H + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_P1, REG_P3, REG_P4, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) => TransposeEven(left, right); + + /// + /// svuint32_t svtrn1[_u32](svuint32_t op1, svuint32_t op2) + /// TRN1 Zresult.S, Zop1.S, Zop2.S + /// svbool_t svtrn1_b32(svbool_t op1, svbool_t op2) + /// TRN1 Presult.S, Pop1.S, Pop2.S + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_P1, REG_P3, REG_P4, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) => TransposeEven(left, right); + + /// + /// svuint64_t svtrn1[_u64](svuint64_t op1, svuint64_t op2) + /// TRN1 Zresult.D, Zop1.D, Zop2.D + /// svbool_t svtrn1_b64(svbool_t op1, svbool_t op2) + /// TRN1 Presult.D, Pop1.D, Pop2.D + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_P1, REG_P3, REG_P4, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) => TransposeEven(left, right); + + /// + /// svfloat32_t svtrn1[_f32](svfloat32_t op1, svfloat32_t op2) + /// TRN1 Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_P1, REG_P3, REG_P4, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) => TransposeEven(left, right); + + /// + /// svfloat64_t svtrn1[_f64](svfloat64_t op1, svfloat64_t op2) + /// TRN1 Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_P1, REG_P3, REG_P4, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) => TransposeEven(left, right); + + + /// TransposeOdd : Interleave odd elements from two inputs + + /// + /// svint8_t svtrn2[_s8](svint8_t op1, svint8_t op2) + /// TRN2 Zresult.B, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_P5, REG_P2, REG_P7, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) => TransposeOdd(left, right); + + /// + /// svint16_t svtrn2[_s16](svint16_t op1, svint16_t op2) + /// TRN2 Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_P5, REG_P2, REG_P7, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) => TransposeOdd(left, right); + + /// + /// svint32_t svtrn2[_s32](svint32_t op1, svint32_t op2) + /// TRN2 Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_P5, REG_P2, REG_P7, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) => TransposeOdd(left, right); + + /// + /// svint64_t svtrn2[_s64](svint64_t op1, svint64_t op2) + /// TRN2 Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_P5, REG_P2, REG_P7, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) => TransposeOdd(left, right); + + /// + /// svuint8_t svtrn2[_u8](svuint8_t op1, svuint8_t op2) + /// TRN2 Zresult.B, Zop1.B, Zop2.B + /// svbool_t svtrn2_b8(svbool_t op1, svbool_t op2) + /// TRN2 Presult.B, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_P5, REG_P2, REG_P7, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) => TransposeOdd(left, right); + + /// + /// svuint16_t svtrn2[_u16](svuint16_t op1, svuint16_t op2) + /// TRN2 Zresult.H, Zop1.H, Zop2.H + /// svbool_t svtrn2_b16(svbool_t op1, svbool_t op2) + /// TRN2 Presult.H, Pop1.H, Pop2.H + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_P5, REG_P2, REG_P7, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) => TransposeOdd(left, right); + + /// + /// svuint32_t svtrn2[_u32](svuint32_t op1, svuint32_t op2) + /// TRN2 Zresult.S, Zop1.S, Zop2.S + /// svbool_t svtrn2_b32(svbool_t op1, svbool_t op2) + /// TRN2 Presult.S, Pop1.S, Pop2.S + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_P5, REG_P2, REG_P7, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) => TransposeOdd(left, right); + + /// + /// svuint64_t svtrn2[_u64](svuint64_t op1, svuint64_t op2) + /// TRN2 Zresult.D, Zop1.D, Zop2.D + /// svbool_t svtrn2_b64(svbool_t op1, svbool_t op2) + /// TRN2 Presult.D, Pop1.D, Pop2.D + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_P5, REG_P2, REG_P7, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) => TransposeOdd(left, right); + + /// + /// svfloat32_t svtrn2[_f32](svfloat32_t op1, svfloat32_t op2) + /// TRN2 Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_P5, REG_P2, REG_P7, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) => TransposeOdd(left, right); + + /// + /// svfloat64_t svtrn2[_f64](svfloat64_t op1, svfloat64_t op2) + /// TRN2 Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_P5, REG_P2, REG_P7, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) => TransposeOdd(left, right); + + + /// TrigonometricMultiplyAddCoefficient : Trigonometric multiply-add coefficient + + /// + /// svfloat32_t svtmad[_f32](svfloat32_t op1, svfloat32_t op2, uint64_t imm3) + /// FTMAD Ztied1.S, Ztied1.S, Zop2.S, #imm3 + /// MOVPRFX Zresult, Zop1; FTMAD Zresult.S, Zresult.S, Zop2.S, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_HN_2A FTMAD ., ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_ftmad, EA_SCALABLE, REG_V0, REG_V2, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ftmad, EA_SCALABLE, REG_V3, REG_V5, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ftmad, EA_SCALABLE, REG_V4, REG_V2, 7, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector TrigonometricMultiplyAddCoefficient(Vector left, Vector right, [ConstantExpected] byte control) => TrigonometricMultiplyAddCoefficient(left, right, control); + + /// + /// svfloat64_t svtmad[_f64](svfloat64_t op1, svfloat64_t op2, uint64_t imm3) + /// FTMAD Ztied1.D, Ztied1.D, Zop2.D, #imm3 + /// MOVPRFX Zresult, Zop1; FTMAD Zresult.D, Zresult.D, Zop2.D, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_HN_2A FTMAD ., ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_ftmad, EA_SCALABLE, REG_V0, REG_V2, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ftmad, EA_SCALABLE, REG_V3, REG_V5, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ftmad, EA_SCALABLE, REG_V4, REG_V2, 7, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector TrigonometricMultiplyAddCoefficient(Vector left, Vector right, [ConstantExpected] byte control) => TrigonometricMultiplyAddCoefficient(left, right, control); + + + /// TrigonometricSelectCoefficient : Trigonometric select coefficient + + /// + /// svfloat32_t svtssel[_f32](svfloat32_t op1, svuint32_t op2) + /// FTSSEL Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_BK_3A FTSSEL ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_ftssel, EA_SCALABLE, REG_V17, REG_V16, REG_V15, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector TrigonometricSelectCoefficient(Vector value, Vector selector) => TrigonometricSelectCoefficient(value, selector); + + /// + /// svfloat64_t svtssel[_f64](svfloat64_t op1, svuint64_t op2) + /// FTSSEL Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_BK_3A FTSSEL ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_ftssel, EA_SCALABLE, REG_V17, REG_V16, REG_V15, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector TrigonometricSelectCoefficient(Vector value, Vector selector) => TrigonometricSelectCoefficient(value, selector); + + + /// TrigonometricStartingValue : Trigonometric starting value + + /// + /// svfloat32_t svtsmul[_f32](svfloat32_t op1, svuint32_t op2) + /// FTSMUL Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_HK_3A FTSMUL ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_ftsmul, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector TrigonometricStartingValue(Vector value, Vector sign) => TrigonometricStartingValue(value, sign); + + /// + /// svfloat64_t svtsmul[_f64](svfloat64_t op1, svuint64_t op2) + /// FTSMUL Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_HK_3A FTSMUL ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_ftsmul, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector TrigonometricStartingValue(Vector value, Vector sign) => TrigonometricStartingValue(value, sign); + + + /// UnzipEven : Concatenate even elements from two inputs + + /// + /// svint8_t svuzp1[_s8](svint8_t op1, svint8_t op2) + /// UZP1 Zresult.B, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) => UnzipEven(left, right); + + /// + /// svint16_t svuzp1[_s16](svint16_t op1, svint16_t op2) + /// UZP1 Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) => UnzipEven(left, right); + + /// + /// svint32_t svuzp1[_s32](svint32_t op1, svint32_t op2) + /// UZP1 Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) => UnzipEven(left, right); + + /// + /// svint64_t svuzp1[_s64](svint64_t op1, svint64_t op2) + /// UZP1 Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) => UnzipEven(left, right); + + /// + /// svuint8_t svuzp1[_u8](svuint8_t op1, svuint8_t op2) + /// UZP1 Zresult.B, Zop1.B, Zop2.B + /// svbool_t svuzp1_b8(svbool_t op1, svbool_t op2) + /// UZP1 Presult.B, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) => UnzipEven(left, right); + + /// + /// svuint16_t svuzp1[_u16](svuint16_t op1, svuint16_t op2) + /// UZP1 Zresult.H, Zop1.H, Zop2.H + /// svbool_t svuzp1_b16(svbool_t op1, svbool_t op2) + /// UZP1 Presult.H, Pop1.H, Pop2.H + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) => UnzipEven(left, right); + + /// + /// svuint32_t svuzp1[_u32](svuint32_t op1, svuint32_t op2) + /// UZP1 Zresult.S, Zop1.S, Zop2.S + /// svbool_t svuzp1_b32(svbool_t op1, svbool_t op2) + /// UZP1 Presult.S, Pop1.S, Pop2.S + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) => UnzipEven(left, right); + + /// + /// svuint64_t svuzp1[_u64](svuint64_t op1, svuint64_t op2) + /// UZP1 Zresult.D, Zop1.D, Zop2.D + /// svbool_t svuzp1_b64(svbool_t op1, svbool_t op2) + /// UZP1 Presult.D, Pop1.D, Pop2.D + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) => UnzipEven(left, right); + + /// + /// svfloat32_t svuzp1[_f32](svfloat32_t op1, svfloat32_t op2) + /// UZP1 Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) => UnzipEven(left, right); + + /// + /// svfloat64_t svuzp1[_f64](svfloat64_t op1, svfloat64_t op2) + /// UZP1 Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) => UnzipEven(left, right); + + + /// UnzipOdd : Concatenate odd elements from two inputs + + /// + /// svint8_t svuzp2[_s8](svint8_t op1, svint8_t op2) + /// UZP2 Zresult.B, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) => UnzipOdd(left, right); + + /// + /// svint16_t svuzp2[_s16](svint16_t op1, svint16_t op2) + /// UZP2 Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) => UnzipOdd(left, right); + + /// + /// svint32_t svuzp2[_s32](svint32_t op1, svint32_t op2) + /// UZP2 Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) => UnzipOdd(left, right); + + /// + /// svint64_t svuzp2[_s64](svint64_t op1, svint64_t op2) + /// UZP2 Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) => UnzipOdd(left, right); + + /// + /// svuint8_t svuzp2[_u8](svuint8_t op1, svuint8_t op2) + /// UZP2 Zresult.B, Zop1.B, Zop2.B + /// svbool_t svuzp2_b8(svbool_t op1, svbool_t op2) + /// UZP2 Presult.B, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) => UnzipOdd(left, right); + + /// + /// svuint16_t svuzp2[_u16](svuint16_t op1, svuint16_t op2) + /// UZP2 Zresult.H, Zop1.H, Zop2.H + /// svbool_t svuzp2_b16(svbool_t op1, svbool_t op2) + /// UZP2 Presult.H, Pop1.H, Pop2.H + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) => UnzipOdd(left, right); + + /// + /// svuint32_t svuzp2[_u32](svuint32_t op1, svuint32_t op2) + /// UZP2 Zresult.S, Zop1.S, Zop2.S + /// svbool_t svuzp2_b32(svbool_t op1, svbool_t op2) + /// UZP2 Presult.S, Pop1.S, Pop2.S + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) => UnzipOdd(left, right); + + /// + /// svuint64_t svuzp2[_u64](svuint64_t op1, svuint64_t op2) + /// UZP2 Zresult.D, Zop1.D, Zop2.D + /// svbool_t svuzp2_b64(svbool_t op1, svbool_t op2) + /// UZP2 Presult.D, Pop1.D, Pop2.D + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) => UnzipOdd(left, right); + + /// + /// svfloat32_t svuzp2[_f32](svfloat32_t op1, svfloat32_t op2) + /// UZP2 Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) => UnzipOdd(left, right); + + /// + /// svfloat64_t svuzp2[_f64](svfloat64_t op1, svfloat64_t op2) + /// UZP2 Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) => UnzipOdd(left, right); + + + /// VectorTableLookup : Table lookup in single-vector table + + /// + /// svint8_t svtbl[_s8](svint8_t data, svuint8_t indices) + /// TBL Zresult.B, Zdata.B, Zindices.B + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) => VectorTableLookup(data, indices); + + /// + /// svint16_t svtbl[_s16](svint16_t data, svuint16_t indices) + /// TBL Zresult.H, Zdata.H, Zindices.H + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) => VectorTableLookup(data, indices); + + /// + /// svint32_t svtbl[_s32](svint32_t data, svuint32_t indices) + /// TBL Zresult.S, Zdata.S, Zindices.S + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) => VectorTableLookup(data, indices); + + /// + /// svint64_t svtbl[_s64](svint64_t data, svuint64_t indices) + /// TBL Zresult.D, Zdata.D, Zindices.D + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) => VectorTableLookup(data, indices); + + /// + /// svuint8_t svtbl[_u8](svuint8_t data, svuint8_t indices) + /// TBL Zresult.B, Zdata.B, Zindices.B + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) => VectorTableLookup(data, indices); + + /// + /// svuint16_t svtbl[_u16](svuint16_t data, svuint16_t indices) + /// TBL Zresult.H, Zdata.H, Zindices.H + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) => VectorTableLookup(data, indices); + + /// + /// svuint32_t svtbl[_u32](svuint32_t data, svuint32_t indices) + /// TBL Zresult.S, Zdata.S, Zindices.S + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) => VectorTableLookup(data, indices); + + /// + /// svuint64_t svtbl[_u64](svuint64_t data, svuint64_t indices) + /// TBL Zresult.D, Zdata.D, Zindices.D + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) => VectorTableLookup(data, indices); + + /// + /// svfloat32_t svtbl[_f32](svfloat32_t data, svuint32_t indices) + /// TBL Zresult.S, Zdata.S, Zindices.S + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) => VectorTableLookup(data, indices); + + /// + /// svfloat64_t svtbl[_f64](svfloat64_t data, svuint64_t indices) + /// TBL Zresult.D, Zdata.D, Zindices.D + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) => VectorTableLookup(data, indices); + + + /// Xor : Bitwise exclusive OR + + /// + /// svint8_t sveor[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// EOR Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; EOR Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t sveor[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// EOR Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// EOR Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// EOR Zresult.D, Zop1.D, Zop2.D + /// svint8_t sveor[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; EOR Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; EOR Zresult.B, Pg/M, Zresult.B, Zop1.B + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// EOR Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A EOR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_eor, EA_SCALABLE, REG_V14, REG_P5, REG_V16, INS_OPTS_SCALABLE_S); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_eor, EA_SCALABLE, REG_P8, REG_P15, REG_P11, REG_P7, INS_OPTS_SCALABLE_B); /* EOR .B, /Z, .B, .B */ + /// IF_SVE_AU_3A EOR .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_eor, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Xor(Vector left, Vector right) => Xor(left, right); + + /// + /// svint16_t sveor[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// EOR Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; EOR Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t sveor[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// EOR Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// EOR Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// EOR Zresult.D, Zop1.D, Zop2.D + /// svint16_t sveor[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; EOR Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; EOR Zresult.H, Pg/M, Zresult.H, Zop1.H + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// EOR Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A EOR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_eor, EA_SCALABLE, REG_V14, REG_P5, REG_V16, INS_OPTS_SCALABLE_S); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_eor, EA_SCALABLE, REG_P8, REG_P15, REG_P11, REG_P7, INS_OPTS_SCALABLE_B); /* EOR .B, /Z, .B, .B */ + /// IF_SVE_AU_3A EOR .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_eor, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Xor(Vector left, Vector right) => Xor(left, right); + + /// + /// svint32_t sveor[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// EOR Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; EOR Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t sveor[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// EOR Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// EOR Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// EOR Zresult.D, Zop1.D, Zop2.D + /// svint32_t sveor[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; EOR Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; EOR Zresult.S, Pg/M, Zresult.S, Zop1.S + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// EOR Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A EOR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_eor, EA_SCALABLE, REG_V14, REG_P5, REG_V16, INS_OPTS_SCALABLE_S); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_eor, EA_SCALABLE, REG_P8, REG_P15, REG_P11, REG_P7, INS_OPTS_SCALABLE_B); /* EOR .B, /Z, .B, .B */ + /// IF_SVE_AU_3A EOR .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_eor, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Xor(Vector left, Vector right) => Xor(left, right); + + /// + /// svint64_t sveor[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// EOR Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; EOR Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t sveor[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// EOR Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// EOR Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// EOR Zresult.D, Zop1.D, Zop2.D + /// svint64_t sveor[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; EOR Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; EOR Zresult.D, Pg/M, Zresult.D, Zop1.D + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// EOR Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A EOR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_eor, EA_SCALABLE, REG_V14, REG_P5, REG_V16, INS_OPTS_SCALABLE_S); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_eor, EA_SCALABLE, REG_P8, REG_P15, REG_P11, REG_P7, INS_OPTS_SCALABLE_B); /* EOR .B, /Z, .B, .B */ + /// IF_SVE_AU_3A EOR .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_eor, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Xor(Vector left, Vector right) => Xor(left, right); + + /// + /// svuint8_t sveor[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// EOR Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; EOR Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t sveor[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// EOR Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// EOR Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// EOR Zresult.D, Zop1.D, Zop2.D + /// svuint8_t sveor[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; EOR Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; EOR Zresult.B, Pg/M, Zresult.B, Zop1.B + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// EOR Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A EOR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_eor, EA_SCALABLE, REG_V14, REG_P5, REG_V16, INS_OPTS_SCALABLE_S); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_eor, EA_SCALABLE, REG_P8, REG_P15, REG_P11, REG_P7, INS_OPTS_SCALABLE_B); /* EOR .B, /Z, .B, .B */ + /// IF_SVE_AU_3A EOR .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_eor, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Xor(Vector left, Vector right) => Xor(left, right); + + /// + /// svuint16_t sveor[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// EOR Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; EOR Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t sveor[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// EOR Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// EOR Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// EOR Zresult.D, Zop1.D, Zop2.D + /// svuint16_t sveor[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; EOR Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; EOR Zresult.H, Pg/M, Zresult.H, Zop1.H + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// EOR Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A EOR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_eor, EA_SCALABLE, REG_V14, REG_P5, REG_V16, INS_OPTS_SCALABLE_S); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_eor, EA_SCALABLE, REG_P8, REG_P15, REG_P11, REG_P7, INS_OPTS_SCALABLE_B); /* EOR .B, /Z, .B, .B */ + /// IF_SVE_AU_3A EOR .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_eor, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Xor(Vector left, Vector right) => Xor(left, right); + + /// + /// svuint32_t sveor[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// EOR Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; EOR Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t sveor[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// EOR Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// EOR Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// EOR Zresult.D, Zop1.D, Zop2.D + /// svuint32_t sveor[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; EOR Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; EOR Zresult.S, Pg/M, Zresult.S, Zop1.S + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// EOR Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A EOR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_eor, EA_SCALABLE, REG_V14, REG_P5, REG_V16, INS_OPTS_SCALABLE_S); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_eor, EA_SCALABLE, REG_P8, REG_P15, REG_P11, REG_P7, INS_OPTS_SCALABLE_B); /* EOR .B, /Z, .B, .B */ + /// IF_SVE_AU_3A EOR .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_eor, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Xor(Vector left, Vector right) => Xor(left, right); + + /// + /// svuint64_t sveor[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// EOR Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; EOR Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t sveor[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// EOR Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// EOR Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// EOR Zresult.D, Zop1.D, Zop2.D + /// svuint64_t sveor[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; EOR Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; EOR Zresult.D, Pg/M, Zresult.D, Zop1.D + /// svbool_t sveor[_b]_z(svbool_t pg, svbool_t op1, svbool_t op2) + /// EOR Presult.B, Pg/Z, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_AA_3A EOR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_eor, EA_SCALABLE, REG_V14, REG_P5, REG_V16, INS_OPTS_SCALABLE_S); + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_eor, EA_SCALABLE, REG_P8, REG_P15, REG_P11, REG_P7, INS_OPTS_SCALABLE_B); /* EOR .B, /Z, .B, .B */ + /// IF_SVE_AU_3A EOR .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_eor, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Xor(Vector left, Vector right) => Xor(left, right); + + + /// XorAcross : Bitwise exclusive OR reduction to scalar + + /// + /// int8_t sveorv[_s8](svbool_t pg, svint8_t op) + /// EORV Bresult, Pg, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_AF_3A EORV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_eorv, EA_2BYTE, REG_V1, REG_P1, REG_V1, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector XorAcross(Vector value) => XorAcross(value); + + /// + /// int16_t sveorv[_s16](svbool_t pg, svint16_t op) + /// EORV Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_AF_3A EORV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_eorv, EA_2BYTE, REG_V1, REG_P1, REG_V1, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector XorAcross(Vector value) => XorAcross(value); + + /// + /// int32_t sveorv[_s32](svbool_t pg, svint32_t op) + /// EORV Sresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AF_3A EORV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_eorv, EA_2BYTE, REG_V1, REG_P1, REG_V1, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector XorAcross(Vector value) => XorAcross(value); + + /// + /// int64_t sveorv[_s64](svbool_t pg, svint64_t op) + /// EORV Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AF_3A EORV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_eorv, EA_2BYTE, REG_V1, REG_P1, REG_V1, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector XorAcross(Vector value) => XorAcross(value); + + /// + /// uint8_t sveorv[_u8](svbool_t pg, svuint8_t op) + /// EORV Bresult, Pg, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_AF_3A EORV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_eorv, EA_2BYTE, REG_V1, REG_P1, REG_V1, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector XorAcross(Vector value) => XorAcross(value); + + /// + /// uint16_t sveorv[_u16](svbool_t pg, svuint16_t op) + /// EORV Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_AF_3A EORV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_eorv, EA_2BYTE, REG_V1, REG_P1, REG_V1, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector XorAcross(Vector value) => XorAcross(value); + + /// + /// uint32_t sveorv[_u32](svbool_t pg, svuint32_t op) + /// EORV Sresult, Pg, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AF_3A EORV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_eorv, EA_2BYTE, REG_V1, REG_P1, REG_V1, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector XorAcross(Vector value) => XorAcross(value); + + /// + /// uint64_t sveorv[_u64](svbool_t pg, svuint64_t op) + /// EORV Dresult, Pg, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AF_3A EORV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_eorv, EA_2BYTE, REG_V1, REG_P1, REG_V1, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector XorAcross(Vector value) => XorAcross(value); + + + /// ZeroExtend16 : Zero-extend the low 16 bits + + /// + /// svuint32_t svexth[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// UXTH Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; UXTH Zresult.S, Pg/M, Zop.S + /// svuint32_t svexth[_u32]_x(svbool_t pg, svuint32_t op) + /// UXTH Ztied.S, Pg/M, Ztied.S + /// AND Ztied.S, Ztied.S, #65535 + /// svuint32_t svexth[_u32]_z(svbool_t pg, svuint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; UXTH Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AQ_3A UXTH ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_uxth, EA_SCALABLE, REG_V18, REG_P5, REG_V13, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_uxth, EA_SCALABLE, REG_V18, REG_P5, REG_V13, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ZeroExtend16(Vector value) => ZeroExtend16(value); + + /// + /// svuint64_t svexth[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// UXTH Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; UXTH Zresult.D, Pg/M, Zop.D + /// svuint64_t svexth[_u64]_x(svbool_t pg, svuint64_t op) + /// UXTH Ztied.D, Pg/M, Ztied.D + /// AND Ztied.D, Ztied.D, #65535 + /// svuint64_t svexth[_u64]_z(svbool_t pg, svuint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; UXTH Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AQ_3A UXTH ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_uxth, EA_SCALABLE, REG_V18, REG_P5, REG_V13, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_uxth, EA_SCALABLE, REG_V18, REG_P5, REG_V13, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ZeroExtend16(Vector value) => ZeroExtend16(value); + + + /// ZeroExtend32 : Zero-extend the low 32 bits + + /// + /// svuint64_t svextw[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// UXTW Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; UXTW Zresult.D, Pg/M, Zop.D + /// svuint64_t svextw[_u64]_x(svbool_t pg, svuint64_t op) + /// UXTW Ztied.D, Pg/M, Ztied.D + /// AND Ztied.D, Ztied.D, #4294967295 + /// svuint64_t svextw[_u64]_z(svbool_t pg, svuint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; UXTW Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AQ_3A UXTW .D, /M, .D + /// theEmitter->emitIns_R_R_R(INS_sve_uxtw, EA_SCALABLE, REG_V17, REG_P6, REG_V14, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ZeroExtend32(Vector value) => ZeroExtend32(value); + + + /// ZeroExtend8 : Zero-extend the low 8 bits + + /// + /// svuint16_t svextb[_u16]_m(svuint16_t inactive, svbool_t pg, svuint16_t op) + /// UXTB Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; UXTB Zresult.H, Pg/M, Zop.H + /// svuint16_t svextb[_u16]_x(svbool_t pg, svuint16_t op) + /// UXTB Ztied.H, Pg/M, Ztied.H + /// AND Ztied.H, Ztied.H, #255 + /// svuint16_t svextb[_u16]_z(svbool_t pg, svuint16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; UXTB Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_AQ_3A UXTB ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_uxtb, EA_SCALABLE, REG_V19, REG_P4, REG_V12, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_uxtb, EA_SCALABLE, REG_V19, REG_P4, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_uxtb, EA_SCALABLE, REG_V19, REG_P4, REG_V12, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ZeroExtend8(Vector value) => ZeroExtend8(value); + + /// + /// svuint32_t svextb[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// UXTB Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; UXTB Zresult.S, Pg/M, Zop.S + /// svuint32_t svextb[_u32]_x(svbool_t pg, svuint32_t op) + /// UXTB Ztied.S, Pg/M, Ztied.S + /// AND Ztied.S, Ztied.S, #255 + /// svuint32_t svextb[_u32]_z(svbool_t pg, svuint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; UXTB Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_AQ_3A UXTB ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_uxtb, EA_SCALABLE, REG_V19, REG_P4, REG_V12, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_uxtb, EA_SCALABLE, REG_V19, REG_P4, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_uxtb, EA_SCALABLE, REG_V19, REG_P4, REG_V12, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ZeroExtend8(Vector value) => ZeroExtend8(value); + + /// + /// svuint64_t svextb[_u64]_m(svuint64_t inactive, svbool_t pg, svuint64_t op) + /// UXTB Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; UXTB Zresult.D, Pg/M, Zop.D + /// svuint64_t svextb[_u64]_x(svbool_t pg, svuint64_t op) + /// UXTB Ztied.D, Pg/M, Ztied.D + /// AND Ztied.D, Ztied.D, #255 + /// svuint64_t svextb[_u64]_z(svbool_t pg, svuint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; UXTB Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_AQ_3A UXTB ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_uxtb, EA_SCALABLE, REG_V19, REG_P4, REG_V12, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_uxtb, EA_SCALABLE, REG_V19, REG_P4, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_uxtb, EA_SCALABLE, REG_V19, REG_P4, REG_V12, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ZeroExtend8(Vector value) => ZeroExtend8(value); + + + /// ZeroExtendWideningLower : Unpack and extend low half + + /// + /// svuint16_t svunpklo[_u16](svuint8_t op) + /// UUNPKLO Zresult.H, Zop.B + /// svbool_t svunpklo[_b](svbool_t op) + /// PUNPKLO Presult.H, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_CH_2A UUNPKLO ., . + /// theEmitter->emitIns_R_R(INS_sve_uunpklo, EA_SCALABLE, REG_V8, REG_V6, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ZeroExtendWideningLower(Vector value) => ZeroExtendWideningLower(value); + + /// + /// svuint32_t svunpklo[_u32](svuint16_t op) + /// UUNPKLO Zresult.S, Zop.H + /// svbool_t svunpklo[_b](svbool_t op) + /// PUNPKLO Presult.H, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_CH_2A UUNPKLO ., . + /// theEmitter->emitIns_R_R(INS_sve_uunpklo, EA_SCALABLE, REG_V8, REG_V6, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ZeroExtendWideningLower(Vector value) => ZeroExtendWideningLower(value); + + /// + /// svuint64_t svunpklo[_u64](svuint32_t op) + /// UUNPKLO Zresult.D, Zop.S + /// svbool_t svunpklo[_b](svbool_t op) + /// PUNPKLO Presult.H, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_CH_2A UUNPKLO ., . + /// theEmitter->emitIns_R_R(INS_sve_uunpklo, EA_SCALABLE, REG_V8, REG_V6, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ZeroExtendWideningLower(Vector value) => ZeroExtendWideningLower(value); + + + /// ZeroExtendWideningUpper : Unpack and extend high half + + /// + /// svuint16_t svunpkhi[_u16](svuint8_t op) + /// UUNPKHI Zresult.H, Zop.B + /// svbool_t svunpkhi[_b](svbool_t op) + /// PUNPKHI Presult.H, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_CH_2A UUNPKHI ., . + /// theEmitter->emitIns_R_R(INS_sve_uunpkhi, EA_SCALABLE, REG_V5, REG_V1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ZeroExtendWideningUpper(Vector value) => ZeroExtendWideningUpper(value); + + /// + /// svuint32_t svunpkhi[_u32](svuint16_t op) + /// UUNPKHI Zresult.S, Zop.H + /// svbool_t svunpkhi[_b](svbool_t op) + /// PUNPKHI Presult.H, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_CH_2A UUNPKHI ., . + /// theEmitter->emitIns_R_R(INS_sve_uunpkhi, EA_SCALABLE, REG_V5, REG_V1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ZeroExtendWideningUpper(Vector value) => ZeroExtendWideningUpper(value); + + /// + /// svuint64_t svunpkhi[_u64](svuint32_t op) + /// UUNPKHI Zresult.D, Zop.S + /// svbool_t svunpkhi[_b](svbool_t op) + /// PUNPKHI Presult.H, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_CH_2A UUNPKHI ., . + /// theEmitter->emitIns_R_R(INS_sve_uunpkhi, EA_SCALABLE, REG_V5, REG_V1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ZeroExtendWideningUpper(Vector value) => ZeroExtendWideningUpper(value); + + + /// ZipHigh : Interleave elements from high halves of two inputs + + /// + /// svint8_t svzip2[_s8](svint8_t op1, svint8_t op2) + /// ZIP2 Zresult.B, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V30, REG_V31, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V1, REG_V2, REG_V3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) => ZipHigh(left, right); + + /// + /// svint16_t svzip2[_s16](svint16_t op1, svint16_t op2) + /// ZIP2 Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V30, REG_V31, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V1, REG_V2, REG_V3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) => ZipHigh(left, right); + + /// + /// svint32_t svzip2[_s32](svint32_t op1, svint32_t op2) + /// ZIP2 Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V30, REG_V31, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V1, REG_V2, REG_V3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) => ZipHigh(left, right); + + /// + /// svint64_t svzip2[_s64](svint64_t op1, svint64_t op2) + /// ZIP2 Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V30, REG_V31, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V1, REG_V2, REG_V3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) => ZipHigh(left, right); + + /// + /// svuint8_t svzip2[_u8](svuint8_t op1, svuint8_t op2) + /// ZIP2 Zresult.B, Zop1.B, Zop2.B + /// svbool_t svzip2_b8(svbool_t op1, svbool_t op2) + /// ZIP2 Presult.B, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V30, REG_V31, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V1, REG_V2, REG_V3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) => ZipHigh(left, right); + + /// + /// svuint16_t svzip2[_u16](svuint16_t op1, svuint16_t op2) + /// ZIP2 Zresult.H, Zop1.H, Zop2.H + /// svbool_t svzip2_b16(svbool_t op1, svbool_t op2) + /// ZIP2 Presult.H, Pop1.H, Pop2.H + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V30, REG_V31, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V1, REG_V2, REG_V3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) => ZipHigh(left, right); + + /// + /// svuint32_t svzip2[_u32](svuint32_t op1, svuint32_t op2) + /// ZIP2 Zresult.S, Zop1.S, Zop2.S + /// svbool_t svzip2_b32(svbool_t op1, svbool_t op2) + /// ZIP2 Presult.S, Pop1.S, Pop2.S + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V30, REG_V31, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V1, REG_V2, REG_V3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) => ZipHigh(left, right); + + /// + /// svuint64_t svzip2[_u64](svuint64_t op1, svuint64_t op2) + /// ZIP2 Zresult.D, Zop1.D, Zop2.D + /// svbool_t svzip2_b64(svbool_t op1, svbool_t op2) + /// ZIP2 Presult.D, Pop1.D, Pop2.D + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V30, REG_V31, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V1, REG_V2, REG_V3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) => ZipHigh(left, right); + + /// + /// svfloat32_t svzip2[_f32](svfloat32_t op1, svfloat32_t op2) + /// ZIP2 Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V30, REG_V31, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V1, REG_V2, REG_V3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) => ZipHigh(left, right); + + /// + /// svfloat64_t svzip2[_f64](svfloat64_t op1, svfloat64_t op2) + /// ZIP2 Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V30, REG_V31, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V1, REG_V2, REG_V3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) => ZipHigh(left, right); + + + /// ZipLow : Interleave elements from low halves of two inputs + + /// + /// svint8_t svzip1[_s8](svint8_t op1, svint8_t op2) + /// ZIP1 Zresult.B, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V24, REG_V25, REG_V26, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V27, REG_V28, REG_V29, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ZipLow(Vector left, Vector right) => ZipLow(left, right); + + /// + /// svint16_t svzip1[_s16](svint16_t op1, svint16_t op2) + /// ZIP1 Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V24, REG_V25, REG_V26, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V27, REG_V28, REG_V29, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ZipLow(Vector left, Vector right) => ZipLow(left, right); + + /// + /// svint32_t svzip1[_s32](svint32_t op1, svint32_t op2) + /// ZIP1 Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V24, REG_V25, REG_V26, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V27, REG_V28, REG_V29, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ZipLow(Vector left, Vector right) => ZipLow(left, right); + + /// + /// svint64_t svzip1[_s64](svint64_t op1, svint64_t op2) + /// ZIP1 Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V24, REG_V25, REG_V26, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V27, REG_V28, REG_V29, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ZipLow(Vector left, Vector right) => ZipLow(left, right); + + /// + /// svuint8_t svzip1[_u8](svuint8_t op1, svuint8_t op2) + /// ZIP1 Zresult.B, Zop1.B, Zop2.B + /// svbool_t svzip1_b8(svbool_t op1, svbool_t op2) + /// ZIP1 Presult.B, Pop1.B, Pop2.B + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V24, REG_V25, REG_V26, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V27, REG_V28, REG_V29, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ZipLow(Vector left, Vector right) => ZipLow(left, right); + + /// + /// svuint16_t svzip1[_u16](svuint16_t op1, svuint16_t op2) + /// ZIP1 Zresult.H, Zop1.H, Zop2.H + /// svbool_t svzip1_b16(svbool_t op1, svbool_t op2) + /// ZIP1 Presult.H, Pop1.H, Pop2.H + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V24, REG_V25, REG_V26, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V27, REG_V28, REG_V29, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ZipLow(Vector left, Vector right) => ZipLow(left, right); + + /// + /// svuint32_t svzip1[_u32](svuint32_t op1, svuint32_t op2) + /// ZIP1 Zresult.S, Zop1.S, Zop2.S + /// svbool_t svzip1_b32(svbool_t op1, svbool_t op2) + /// ZIP1 Presult.S, Pop1.S, Pop2.S + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V24, REG_V25, REG_V26, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V27, REG_V28, REG_V29, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ZipLow(Vector left, Vector right) => ZipLow(left, right); + + /// + /// svuint64_t svzip1[_u64](svuint64_t op1, svuint64_t op2) + /// ZIP1 Zresult.D, Zop1.D, Zop2.D + /// svbool_t svzip1_b64(svbool_t op1, svbool_t op2) + /// ZIP1 Presult.D, Pop1.D, Pop2.D + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V24, REG_V25, REG_V26, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V27, REG_V28, REG_V29, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ZipLow(Vector left, Vector right) => ZipLow(left, right); + + /// + /// svfloat32_t svzip1[_f32](svfloat32_t op1, svfloat32_t op2) + /// ZIP1 Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V24, REG_V25, REG_V26, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V27, REG_V28, REG_V29, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ZipLow(Vector left, Vector right) => ZipLow(left, right); + + /// + /// svfloat64_t svzip1[_f64](svfloat64_t op1, svfloat64_t op2) + /// ZIP1 Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V24, REG_V25, REG_V26, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V27, REG_V28, REG_V29, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ZipLow(Vector left, Vector right) => ZipLow(left, right); + + } +} + diff --git a/sve_api/out_helper_api/Sve2.cs b/sve_api/out_helper_api/Sve2.cs new file mode 100644 index 0000000000000..2a273bcc35157 --- /dev/null +++ b/sve_api/out_helper_api/Sve2.cs @@ -0,0 +1,14583 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class Sve2 : AdvSimd + { + internal Sve2() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// AbsoluteDifferenceAdd : Absolute difference and accumulate + + /// + /// svint8_t svaba[_s8](svint8_t op1, svint8_t op2, svint8_t op3) + /// SABA Ztied1.B, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; SABA Zresult.B, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_FW_3A SABA ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_saba, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_saba, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAdd(addend, left, right); + + /// + /// svint16_t svaba[_s16](svint16_t op1, svint16_t op2, svint16_t op3) + /// SABA Ztied1.H, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; SABA Zresult.H, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_FW_3A SABA ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_saba, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_saba, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAdd(addend, left, right); + + /// + /// svint32_t svaba[_s32](svint32_t op1, svint32_t op2, svint32_t op3) + /// SABA Ztied1.S, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; SABA Zresult.S, Zop2.S, Zop3.S + /// + /// codegenarm64test: + /// IF_SVE_FW_3A SABA ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_saba, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_saba, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAdd(addend, left, right); + + /// + /// svint64_t svaba[_s64](svint64_t op1, svint64_t op2, svint64_t op3) + /// SABA Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; SABA Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_FW_3A SABA ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_saba, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_saba, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAdd(addend, left, right); + + /// + /// svuint8_t svaba[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// UABA Ztied1.B, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; UABA Zresult.B, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_FW_3A UABA ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uaba, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_uaba, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAdd(addend, left, right); + + /// + /// svuint16_t svaba[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// UABA Ztied1.H, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; UABA Zresult.H, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_FW_3A UABA ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uaba, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_uaba, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAdd(addend, left, right); + + /// + /// svuint32_t svaba[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// UABA Ztied1.S, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; UABA Zresult.S, Zop2.S, Zop3.S + /// + /// codegenarm64test: + /// IF_SVE_FW_3A UABA ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uaba, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_uaba, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAdd(addend, left, right); + + /// + /// svuint64_t svaba[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// UABA Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; UABA Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_FW_3A UABA ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uaba, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_uaba, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAdd(addend, left, right); + + + /// AbsoluteDifferenceAddWideningLower : Absolute difference and accumulate long (bottom) + + /// + /// svint16_t svabalb[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// SABALB Ztied1.H, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; SABALB Zresult.H, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_FX_3A SABALB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sabalb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAddWideningLower(addend, left, right); + + /// + /// svint32_t svabalb[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// SABALB Ztied1.S, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; SABALB Zresult.S, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_FX_3A SABALB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sabalb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAddWideningLower(addend, left, right); + + /// + /// svint64_t svabalb[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// SABALB Ztied1.D, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; SABALB Zresult.D, Zop2.S, Zop3.S + /// + /// codegenarm64test: + /// IF_SVE_FX_3A SABALB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sabalb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAddWideningLower(addend, left, right); + + /// + /// svuint16_t svabalb[_u16](svuint16_t op1, svuint8_t op2, svuint8_t op3) + /// UABALB Ztied1.H, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; UABALB Zresult.H, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_FX_3A UABALB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uabalb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAddWideningLower(addend, left, right); + + /// + /// svuint32_t svabalb[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3) + /// UABALB Ztied1.S, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; UABALB Zresult.S, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_FX_3A UABALB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uabalb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAddWideningLower(addend, left, right); + + /// + /// svuint64_t svabalb[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3) + /// UABALB Ztied1.D, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; UABALB Zresult.D, Zop2.S, Zop3.S + /// + /// codegenarm64test: + /// IF_SVE_FX_3A UABALB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uabalb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AbsoluteDifferenceAddWideningLower(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAddWideningLower(addend, left, right); + + + /// AbsoluteDifferenceAddWideningUpper : Absolute difference and accumulate long (top) + + /// + /// svint16_t svabalt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// SABALT Ztied1.H, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; SABALT Zresult.H, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_FX_3A SABALT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sabalt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAddWideningUpper(addend, left, right); + + /// + /// svint32_t svabalt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// SABALT Ztied1.S, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; SABALT Zresult.S, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_FX_3A SABALT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sabalt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAddWideningUpper(addend, left, right); + + /// + /// svint64_t svabalt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// SABALT Ztied1.D, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; SABALT Zresult.D, Zop2.S, Zop3.S + /// + /// codegenarm64test: + /// IF_SVE_FX_3A SABALT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sabalt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAddWideningUpper(addend, left, right); + + /// + /// svuint16_t svabalt[_u16](svuint16_t op1, svuint8_t op2, svuint8_t op3) + /// UABALT Ztied1.H, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; UABALT Zresult.H, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_FX_3A UABALT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uabalt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAddWideningUpper(addend, left, right); + + /// + /// svuint32_t svabalt[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3) + /// UABALT Ztied1.S, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; UABALT Zresult.S, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_FX_3A UABALT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uabalt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAddWideningUpper(addend, left, right); + + /// + /// svuint64_t svabalt[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3) + /// UABALT Ztied1.D, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; UABALT Zresult.D, Zop2.S, Zop3.S + /// + /// codegenarm64test: + /// IF_SVE_FX_3A UABALT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uabalt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AbsoluteDifferenceAddWideningUpper(Vector addend, Vector left, Vector right) => AbsoluteDifferenceAddWideningUpper(addend, left, right); + + + /// AbsoluteDifferenceWideningLower : Absolute difference long (bottom) + + /// + /// svint16_t svabdlb[_s16](svint8_t op1, svint8_t op2) + /// SABDLB Zresult.H, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FL_3A SABDLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sabdlb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, Vector right) => AbsoluteDifferenceWideningLower(left, right); + + /// + /// svint32_t svabdlb[_s32](svint16_t op1, svint16_t op2) + /// SABDLB Zresult.S, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FL_3A SABDLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sabdlb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, Vector right) => AbsoluteDifferenceWideningLower(left, right); + + /// + /// svint64_t svabdlb[_s64](svint32_t op1, svint32_t op2) + /// SABDLB Zresult.D, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FL_3A SABDLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sabdlb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, Vector right) => AbsoluteDifferenceWideningLower(left, right); + + /// + /// svuint16_t svabdlb[_u16](svuint8_t op1, svuint8_t op2) + /// UABDLB Zresult.H, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FL_3A UABDLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uabdlb, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, Vector right) => AbsoluteDifferenceWideningLower(left, right); + + /// + /// svuint32_t svabdlb[_u32](svuint16_t op1, svuint16_t op2) + /// UABDLB Zresult.S, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FL_3A UABDLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uabdlb, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, Vector right) => AbsoluteDifferenceWideningLower(left, right); + + /// + /// svuint64_t svabdlb[_u64](svuint32_t op1, svuint32_t op2) + /// UABDLB Zresult.D, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FL_3A UABDLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uabdlb, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AbsoluteDifferenceWideningLower(Vector left, Vector right) => AbsoluteDifferenceWideningLower(left, right); + + + /// AbsoluteDifferenceWideningUpper : Absolute difference long (top) + + /// + /// svint16_t svabdlt[_s16](svint8_t op1, svint8_t op2) + /// SABDLT Zresult.H, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FL_3A SABDLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sabdlt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, Vector right) => AbsoluteDifferenceWideningUpper(left, right); + + /// + /// svint32_t svabdlt[_s32](svint16_t op1, svint16_t op2) + /// SABDLT Zresult.S, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FL_3A SABDLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sabdlt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, Vector right) => AbsoluteDifferenceWideningUpper(left, right); + + /// + /// svint64_t svabdlt[_s64](svint32_t op1, svint32_t op2) + /// SABDLT Zresult.D, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FL_3A SABDLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sabdlt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, Vector right) => AbsoluteDifferenceWideningUpper(left, right); + + /// + /// svuint16_t svabdlt[_u16](svuint8_t op1, svuint8_t op2) + /// UABDLT Zresult.H, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FL_3A UABDLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uabdlt, EA_SCALABLE, REG_V21, REG_V22, REG_V24, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, Vector right) => AbsoluteDifferenceWideningUpper(left, right); + + /// + /// svuint32_t svabdlt[_u32](svuint16_t op1, svuint16_t op2) + /// UABDLT Zresult.S, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FL_3A UABDLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uabdlt, EA_SCALABLE, REG_V21, REG_V22, REG_V24, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, Vector right) => AbsoluteDifferenceWideningUpper(left, right); + + /// + /// svuint64_t svabdlt[_u64](svuint32_t op1, svuint32_t op2) + /// UABDLT Zresult.D, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FL_3A UABDLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uabdlt, EA_SCALABLE, REG_V21, REG_V22, REG_V24, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector AbsoluteDifferenceWideningUpper(Vector left, Vector right) => AbsoluteDifferenceWideningUpper(left, right); + + + /// AddCarryWideningLower : Add with carry long (bottom) + + /// + /// svuint32_t svadclb[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// ADCLB Ztied1.S, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; ADCLB Zresult.S, Zop2.S, Zop3.S + /// + /// codegenarm64test: + /// IF_SVE_FY_3A ADCLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_adclb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_adclb, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AddCarryWideningLower(Vector op1, Vector op2, Vector op3) => AddCarryWideningLower(op1, op2, op3); + + /// + /// svuint64_t svadclb[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// ADCLB Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; ADCLB Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_FY_3A ADCLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_adclb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_adclb, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AddCarryWideningLower(Vector op1, Vector op2, Vector op3) => AddCarryWideningLower(op1, op2, op3); + + + /// AddCarryWideningUpper : Add with carry long (top) + + /// + /// svuint32_t svadclt[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// ADCLT Ztied1.S, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; ADCLT Zresult.S, Zop2.S, Zop3.S + /// + /// codegenarm64test: + /// IF_SVE_FY_3A ADCLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_adclt, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_adclt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AddCarryWideningUpper(Vector op1, Vector op2, Vector op3) => AddCarryWideningUpper(op1, op2, op3); + + /// + /// svuint64_t svadclt[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// ADCLT Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; ADCLT Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_FY_3A ADCLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_adclt, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_adclt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AddCarryWideningUpper(Vector op1, Vector op2, Vector op3) => AddCarryWideningUpper(op1, op2, op3); + + + /// AddHighNarowingLower : Add narrow high part (bottom) + + /// + /// svint8_t svaddhnb[_s16](svint16_t op1, svint16_t op2) + /// ADDHNB Zresult.B, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_GC_3A ADDHNB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_addhnb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector AddHighNarowingLower(Vector left, Vector right) => AddHighNarowingLower(left, right); + + /// + /// svint16_t svaddhnb[_s32](svint32_t op1, svint32_t op2) + /// ADDHNB Zresult.H, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_GC_3A ADDHNB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_addhnb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector AddHighNarowingLower(Vector left, Vector right) => AddHighNarowingLower(left, right); + + /// + /// svint32_t svaddhnb[_s64](svint64_t op1, svint64_t op2) + /// ADDHNB Zresult.S, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_GC_3A ADDHNB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_addhnb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector AddHighNarowingLower(Vector left, Vector right) => AddHighNarowingLower(left, right); + + /// + /// svuint8_t svaddhnb[_u16](svuint16_t op1, svuint16_t op2) + /// ADDHNB Zresult.B, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_GC_3A ADDHNB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_addhnb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector AddHighNarowingLower(Vector left, Vector right) => AddHighNarowingLower(left, right); + + /// + /// svuint16_t svaddhnb[_u32](svuint32_t op1, svuint32_t op2) + /// ADDHNB Zresult.H, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_GC_3A ADDHNB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_addhnb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector AddHighNarowingLower(Vector left, Vector right) => AddHighNarowingLower(left, right); + + /// + /// svuint32_t svaddhnb[_u64](svuint64_t op1, svuint64_t op2) + /// ADDHNB Zresult.S, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_GC_3A ADDHNB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_addhnb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector AddHighNarowingLower(Vector left, Vector right) => AddHighNarowingLower(left, right); + + + /// AddHighNarowingUpper : Add narrow high part (top) + + /// + /// svint8_t svaddhnt[_s16](svint8_t even, svint16_t op1, svint16_t op2) + /// ADDHNT Ztied.B, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_GC_3A ADDHNT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_addhnt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, Vector right) => AddHighNarowingUpper(even, left, right); + + /// + /// svint16_t svaddhnt[_s32](svint16_t even, svint32_t op1, svint32_t op2) + /// ADDHNT Ztied.H, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_GC_3A ADDHNT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_addhnt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, Vector right) => AddHighNarowingUpper(even, left, right); + + /// + /// svint32_t svaddhnt[_s64](svint32_t even, svint64_t op1, svint64_t op2) + /// ADDHNT Ztied.S, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_GC_3A ADDHNT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_addhnt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, Vector right) => AddHighNarowingUpper(even, left, right); + + /// + /// svuint8_t svaddhnt[_u16](svuint8_t even, svuint16_t op1, svuint16_t op2) + /// ADDHNT Ztied.B, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_GC_3A ADDHNT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_addhnt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, Vector right) => AddHighNarowingUpper(even, left, right); + + /// + /// svuint16_t svaddhnt[_u32](svuint16_t even, svuint32_t op1, svuint32_t op2) + /// ADDHNT Ztied.H, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_GC_3A ADDHNT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_addhnt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, Vector right) => AddHighNarowingUpper(even, left, right); + + /// + /// svuint32_t svaddhnt[_u64](svuint32_t even, svuint64_t op1, svuint64_t op2) + /// ADDHNT Ztied.S, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_GC_3A ADDHNT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_addhnt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AddHighNarowingUpper(Vector even, Vector left, Vector right) => AddHighNarowingUpper(even, left, right); + + + /// AddPairwise : Add pairwise + + /// + /// svint8_t svaddp[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// ADDP Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; ADDP Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svaddp[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// ADDP Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; ADDP Zresult.B, Pg/M, Zresult.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_ER_3A ADDP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_addp, EA_SCALABLE, REG_V23, REG_P6, REG_V18, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) => AddPairwise(left, right); + + /// + /// svint16_t svaddp[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// ADDP Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; ADDP Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svaddp[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// ADDP Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; ADDP Zresult.H, Pg/M, Zresult.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_ER_3A ADDP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_addp, EA_SCALABLE, REG_V23, REG_P6, REG_V18, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) => AddPairwise(left, right); + + /// + /// svint32_t svaddp[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// ADDP Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; ADDP Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svaddp[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// ADDP Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; ADDP Zresult.S, Pg/M, Zresult.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_ER_3A ADDP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_addp, EA_SCALABLE, REG_V23, REG_P6, REG_V18, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) => AddPairwise(left, right); + + /// + /// svint64_t svaddp[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// ADDP Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; ADDP Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svaddp[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// ADDP Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; ADDP Zresult.D, Pg/M, Zresult.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_ER_3A ADDP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_addp, EA_SCALABLE, REG_V23, REG_P6, REG_V18, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) => AddPairwise(left, right); + + /// + /// svuint8_t svaddp[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// ADDP Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; ADDP Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svaddp[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// ADDP Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; ADDP Zresult.B, Pg/M, Zresult.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_ER_3A ADDP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_addp, EA_SCALABLE, REG_V23, REG_P6, REG_V18, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) => AddPairwise(left, right); + + /// + /// svuint16_t svaddp[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// ADDP Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; ADDP Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svaddp[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// ADDP Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; ADDP Zresult.H, Pg/M, Zresult.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_ER_3A ADDP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_addp, EA_SCALABLE, REG_V23, REG_P6, REG_V18, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) => AddPairwise(left, right); + + /// + /// svuint32_t svaddp[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// ADDP Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; ADDP Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svaddp[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// ADDP Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; ADDP Zresult.S, Pg/M, Zresult.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_ER_3A ADDP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_addp, EA_SCALABLE, REG_V23, REG_P6, REG_V18, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) => AddPairwise(left, right); + + /// + /// svuint64_t svaddp[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// ADDP Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; ADDP Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svaddp[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// ADDP Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; ADDP Zresult.D, Pg/M, Zresult.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_ER_3A ADDP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_addp, EA_SCALABLE, REG_V23, REG_P6, REG_V18, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) => AddPairwise(left, right); + + /// + /// svfloat32_t svaddp[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FADDP Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; FADDP Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svfloat32_t svaddp[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FADDP Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; FADDP Zresult.S, Pg/M, Zresult.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_GR_3A FADDP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_faddp, EA_SCALABLE, REG_V16, REG_P3, REG_V19, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) => AddPairwise(left, right); + + /// + /// svfloat64_t svaddp[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FADDP Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; FADDP Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svfloat64_t svaddp[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FADDP Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; FADDP Zresult.D, Pg/M, Zresult.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_GR_3A FADDP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_faddp, EA_SCALABLE, REG_V16, REG_P3, REG_V19, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) => AddPairwise(left, right); + + + /// AddPairwiseWidening : Add and accumulate long pairwise + + /// + /// svint16_t svadalp[_s16]_m(svbool_t pg, svint16_t op1, svint8_t op2) + /// SADALP Ztied1.H, Pg/M, Zop2.B + /// MOVPRFX Zresult, Zop1; SADALP Zresult.H, Pg/M, Zop2.B + /// svint16_t svadalp[_s16]_x(svbool_t pg, svint16_t op1, svint8_t op2) + /// SADALP Ztied1.H, Pg/M, Zop2.B + /// MOVPRFX Zresult, Zop1; SADALP Zresult.H, Pg/M, Zop2.B + /// svint16_t svadalp[_s16]_z(svbool_t pg, svint16_t op1, svint8_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; SADALP Zresult.H, Pg/M, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_EQ_3A SADALP ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_sadalp, EA_SCALABLE, REG_V26, REG_P3, REG_V8, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddPairwiseWidening(Vector left, Vector right) => AddPairwiseWidening(left, right); + + /// + /// svint32_t svadalp[_s32]_m(svbool_t pg, svint32_t op1, svint16_t op2) + /// SADALP Ztied1.S, Pg/M, Zop2.H + /// MOVPRFX Zresult, Zop1; SADALP Zresult.S, Pg/M, Zop2.H + /// svint32_t svadalp[_s32]_x(svbool_t pg, svint32_t op1, svint16_t op2) + /// SADALP Ztied1.S, Pg/M, Zop2.H + /// MOVPRFX Zresult, Zop1; SADALP Zresult.S, Pg/M, Zop2.H + /// svint32_t svadalp[_s32]_z(svbool_t pg, svint32_t op1, svint16_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; SADALP Zresult.S, Pg/M, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_EQ_3A SADALP ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_sadalp, EA_SCALABLE, REG_V26, REG_P3, REG_V8, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddPairwiseWidening(Vector left, Vector right) => AddPairwiseWidening(left, right); + + /// + /// svint64_t svadalp[_s64]_m(svbool_t pg, svint64_t op1, svint32_t op2) + /// SADALP Ztied1.D, Pg/M, Zop2.S + /// MOVPRFX Zresult, Zop1; SADALP Zresult.D, Pg/M, Zop2.S + /// svint64_t svadalp[_s64]_x(svbool_t pg, svint64_t op1, svint32_t op2) + /// SADALP Ztied1.D, Pg/M, Zop2.S + /// MOVPRFX Zresult, Zop1; SADALP Zresult.D, Pg/M, Zop2.S + /// svint64_t svadalp[_s64]_z(svbool_t pg, svint64_t op1, svint32_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; SADALP Zresult.D, Pg/M, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_EQ_3A SADALP ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_sadalp, EA_SCALABLE, REG_V26, REG_P3, REG_V8, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddPairwiseWidening(Vector left, Vector right) => AddPairwiseWidening(left, right); + + /// + /// svuint16_t svadalp[_u16]_m(svbool_t pg, svuint16_t op1, svuint8_t op2) + /// UADALP Ztied1.H, Pg/M, Zop2.B + /// MOVPRFX Zresult, Zop1; UADALP Zresult.H, Pg/M, Zop2.B + /// svuint16_t svadalp[_u16]_x(svbool_t pg, svuint16_t op1, svuint8_t op2) + /// UADALP Ztied1.H, Pg/M, Zop2.B + /// MOVPRFX Zresult, Zop1; UADALP Zresult.H, Pg/M, Zop2.B + /// svuint16_t svadalp[_u16]_z(svbool_t pg, svuint16_t op1, svuint8_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; UADALP Zresult.H, Pg/M, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_EQ_3A UADALP ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_uadalp, EA_SCALABLE, REG_V27, REG_P2, REG_V9, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_uadalp, EA_SCALABLE, REG_V28, REG_P0, REG_V31, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddPairwiseWidening(Vector left, Vector right) => AddPairwiseWidening(left, right); + + /// + /// svuint32_t svadalp[_u32]_m(svbool_t pg, svuint32_t op1, svuint16_t op2) + /// UADALP Ztied1.S, Pg/M, Zop2.H + /// MOVPRFX Zresult, Zop1; UADALP Zresult.S, Pg/M, Zop2.H + /// svuint32_t svadalp[_u32]_x(svbool_t pg, svuint32_t op1, svuint16_t op2) + /// UADALP Ztied1.S, Pg/M, Zop2.H + /// MOVPRFX Zresult, Zop1; UADALP Zresult.S, Pg/M, Zop2.H + /// svuint32_t svadalp[_u32]_z(svbool_t pg, svuint32_t op1, svuint16_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; UADALP Zresult.S, Pg/M, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_EQ_3A UADALP ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_uadalp, EA_SCALABLE, REG_V27, REG_P2, REG_V9, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_uadalp, EA_SCALABLE, REG_V28, REG_P0, REG_V31, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddPairwiseWidening(Vector left, Vector right) => AddPairwiseWidening(left, right); + + /// + /// svuint64_t svadalp[_u64]_m(svbool_t pg, svuint64_t op1, svuint32_t op2) + /// UADALP Ztied1.D, Pg/M, Zop2.S + /// MOVPRFX Zresult, Zop1; UADALP Zresult.D, Pg/M, Zop2.S + /// svuint64_t svadalp[_u64]_x(svbool_t pg, svuint64_t op1, svuint32_t op2) + /// UADALP Ztied1.D, Pg/M, Zop2.S + /// MOVPRFX Zresult, Zop1; UADALP Zresult.D, Pg/M, Zop2.S + /// svuint64_t svadalp[_u64]_z(svbool_t pg, svuint64_t op1, svuint32_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; UADALP Zresult.D, Pg/M, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_EQ_3A UADALP ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_uadalp, EA_SCALABLE, REG_V27, REG_P2, REG_V9, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_uadalp, EA_SCALABLE, REG_V28, REG_P0, REG_V31, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddPairwiseWidening(Vector left, Vector right) => AddPairwiseWidening(left, right); + + + /// AddRotateComplex : Complex add with rotate + + /// + /// svint8_t svcadd[_s8](svint8_t op1, svint8_t op2, uint64_t imm_rotation) + /// CADD Ztied1.B, Ztied1.B, Zop2.B, #imm_rotation + /// MOVPRFX Zresult, Zop1; CADD Zresult.B, Zresult.B, Zop2.B, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_FV_2A CADD ., ., ., + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V0, REG_V1, 90, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V2, REG_V3, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V4, REG_V5, 270, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V6, REG_V7, 270, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) => AddRotateComplex(left, right, rotation); + + /// + /// svint16_t svcadd[_s16](svint16_t op1, svint16_t op2, uint64_t imm_rotation) + /// CADD Ztied1.H, Ztied1.H, Zop2.H, #imm_rotation + /// MOVPRFX Zresult, Zop1; CADD Zresult.H, Zresult.H, Zop2.H, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_FV_2A CADD ., ., ., + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V0, REG_V1, 90, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V2, REG_V3, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V4, REG_V5, 270, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V6, REG_V7, 270, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) => AddRotateComplex(left, right, rotation); + + /// + /// svint32_t svcadd[_s32](svint32_t op1, svint32_t op2, uint64_t imm_rotation) + /// CADD Ztied1.S, Ztied1.S, Zop2.S, #imm_rotation + /// MOVPRFX Zresult, Zop1; CADD Zresult.S, Zresult.S, Zop2.S, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_FV_2A CADD ., ., ., + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V0, REG_V1, 90, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V2, REG_V3, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V4, REG_V5, 270, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V6, REG_V7, 270, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) => AddRotateComplex(left, right, rotation); + + /// + /// svint64_t svcadd[_s64](svint64_t op1, svint64_t op2, uint64_t imm_rotation) + /// CADD Ztied1.D, Ztied1.D, Zop2.D, #imm_rotation + /// MOVPRFX Zresult, Zop1; CADD Zresult.D, Zresult.D, Zop2.D, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_FV_2A CADD ., ., ., + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V0, REG_V1, 90, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V2, REG_V3, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V4, REG_V5, 270, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V6, REG_V7, 270, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) => AddRotateComplex(left, right, rotation); + + /// + /// svuint8_t svcadd[_u8](svuint8_t op1, svuint8_t op2, uint64_t imm_rotation) + /// CADD Ztied1.B, Ztied1.B, Zop2.B, #imm_rotation + /// MOVPRFX Zresult, Zop1; CADD Zresult.B, Zresult.B, Zop2.B, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_FV_2A CADD ., ., ., + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V0, REG_V1, 90, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V2, REG_V3, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V4, REG_V5, 270, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V6, REG_V7, 270, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) => AddRotateComplex(left, right, rotation); + + /// + /// svuint16_t svcadd[_u16](svuint16_t op1, svuint16_t op2, uint64_t imm_rotation) + /// CADD Ztied1.H, Ztied1.H, Zop2.H, #imm_rotation + /// MOVPRFX Zresult, Zop1; CADD Zresult.H, Zresult.H, Zop2.H, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_FV_2A CADD ., ., ., + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V0, REG_V1, 90, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V2, REG_V3, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V4, REG_V5, 270, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V6, REG_V7, 270, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) => AddRotateComplex(left, right, rotation); + + /// + /// svuint32_t svcadd[_u32](svuint32_t op1, svuint32_t op2, uint64_t imm_rotation) + /// CADD Ztied1.S, Ztied1.S, Zop2.S, #imm_rotation + /// MOVPRFX Zresult, Zop1; CADD Zresult.S, Zresult.S, Zop2.S, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_FV_2A CADD ., ., ., + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V0, REG_V1, 90, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V2, REG_V3, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V4, REG_V5, 270, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V6, REG_V7, 270, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) => AddRotateComplex(left, right, rotation); + + /// + /// svuint64_t svcadd[_u64](svuint64_t op1, svuint64_t op2, uint64_t imm_rotation) + /// CADD Ztied1.D, Ztied1.D, Zop2.D, #imm_rotation + /// MOVPRFX Zresult, Zop1; CADD Zresult.D, Zresult.D, Zop2.D, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_FV_2A CADD ., ., ., + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V0, REG_V1, 90, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V2, REG_V3, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V4, REG_V5, 270, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_cadd, EA_SCALABLE, REG_V6, REG_V7, 270, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) => AddRotateComplex(left, right, rotation); + + + /// AddSaturate : Saturating add + + /// + /// svint8_t svqadd[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// SQADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; SQADD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svqadd[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// SQADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// SQADD Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// SQADD Zresult.B, Zop1.B, Zop2.B + /// svint8_t svqadd[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; SQADD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; SQADD Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_ET_3A SQADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqadd, EA_SCALABLE, REG_V28, REG_P1, REG_V23, INS_OPTS_SCALABLE_B); + /// IF_SVE_AT_3A SQADD ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqadd, EA_SCALABLE, REG_V3, REG_V31, REG_V12, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A SQADD ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_sqadd, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svint16_t svqadd[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// SQADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; SQADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svqadd[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// SQADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// SQADD Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// SQADD Zresult.H, Zop1.H, Zop2.H + /// svint16_t svqadd[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; SQADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; SQADD Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_ET_3A SQADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqadd, EA_SCALABLE, REG_V28, REG_P1, REG_V23, INS_OPTS_SCALABLE_B); + /// IF_SVE_AT_3A SQADD ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqadd, EA_SCALABLE, REG_V3, REG_V31, REG_V12, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A SQADD ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_sqadd, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svint32_t svqadd[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// SQADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; SQADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svqadd[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// SQADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// SQADD Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// SQADD Zresult.S, Zop1.S, Zop2.S + /// svint32_t svqadd[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; SQADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; SQADD Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_ET_3A SQADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqadd, EA_SCALABLE, REG_V28, REG_P1, REG_V23, INS_OPTS_SCALABLE_B); + /// IF_SVE_AT_3A SQADD ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqadd, EA_SCALABLE, REG_V3, REG_V31, REG_V12, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A SQADD ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_sqadd, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svint64_t svqadd[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// SQADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; SQADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svqadd[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// SQADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// SQADD Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// SQADD Zresult.D, Zop1.D, Zop2.D + /// svint64_t svqadd[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; SQADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; SQADD Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_ET_3A SQADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqadd, EA_SCALABLE, REG_V28, REG_P1, REG_V23, INS_OPTS_SCALABLE_B); + /// IF_SVE_AT_3A SQADD ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqadd, EA_SCALABLE, REG_V3, REG_V31, REG_V12, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A SQADD ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_sqadd, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svuint8_t svqadd[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// UQADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; UQADD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svqadd[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// UQADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// UQADD Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// UQADD Zresult.B, Zop1.B, Zop2.B + /// svuint8_t svqadd[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; UQADD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; UQADD Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_ET_3A UQADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqadd, EA_SCALABLE, REG_V0, REG_P3, REG_V27, INS_OPTS_SCALABLE_S); + /// IF_SVE_AT_3A UQADD ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqadd, EA_SCALABLE, REG_V23, REG_V28, REG_V29, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A UQADD ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_uqadd, EA_SCALABLE, REG_V5, 5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svuint16_t svqadd[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// UQADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; UQADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svqadd[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// UQADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// UQADD Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// UQADD Zresult.H, Zop1.H, Zop2.H + /// svuint16_t svqadd[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; UQADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; UQADD Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_ET_3A UQADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqadd, EA_SCALABLE, REG_V0, REG_P3, REG_V27, INS_OPTS_SCALABLE_S); + /// IF_SVE_AT_3A UQADD ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqadd, EA_SCALABLE, REG_V23, REG_V28, REG_V29, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A UQADD ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_uqadd, EA_SCALABLE, REG_V5, 5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svuint32_t svqadd[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// UQADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; UQADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svqadd[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// UQADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// UQADD Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// UQADD Zresult.S, Zop1.S, Zop2.S + /// svuint32_t svqadd[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; UQADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; UQADD Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_ET_3A UQADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqadd, EA_SCALABLE, REG_V0, REG_P3, REG_V27, INS_OPTS_SCALABLE_S); + /// IF_SVE_AT_3A UQADD ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqadd, EA_SCALABLE, REG_V23, REG_V28, REG_V29, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A UQADD ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_uqadd, EA_SCALABLE, REG_V5, 5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + /// + /// svuint64_t svqadd[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// UQADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; UQADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svqadd[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// UQADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// UQADD Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// UQADD Zresult.D, Zop1.D, Zop2.D + /// svuint64_t svqadd[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; UQADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; UQADD Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_ET_3A UQADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqadd, EA_SCALABLE, REG_V0, REG_P3, REG_V27, INS_OPTS_SCALABLE_S); + /// IF_SVE_AT_3A UQADD ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqadd, EA_SCALABLE, REG_V23, REG_V28, REG_V29, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A UQADD ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_uqadd, EA_SCALABLE, REG_V5, 5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddSaturate(Vector left, Vector right) => AddSaturate(left, right); + + + /// AddSaturateWithSignedAddend : Saturating add with signed addend + + /// + /// svuint8_t svsqadd[_u8]_m(svbool_t pg, svuint8_t op1, svint8_t op2) + /// USQADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; USQADD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svsqadd[_u8]_x(svbool_t pg, svuint8_t op1, svint8_t op2) + /// USQADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; USQADD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svsqadd[_u8]_z(svbool_t pg, svuint8_t op1, svint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; USQADD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_ET_3A USQADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_usqadd, EA_SCALABLE, REG_V3, REG_P6, REG_V30, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddSaturateWithSignedAddend(Vector left, Vector right) => AddSaturateWithSignedAddend(left, right); + + /// + /// svuint16_t svsqadd[_u16]_m(svbool_t pg, svuint16_t op1, svint16_t op2) + /// USQADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; USQADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svsqadd[_u16]_x(svbool_t pg, svuint16_t op1, svint16_t op2) + /// USQADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; USQADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svsqadd[_u16]_z(svbool_t pg, svuint16_t op1, svint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; USQADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_ET_3A USQADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_usqadd, EA_SCALABLE, REG_V3, REG_P6, REG_V30, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddSaturateWithSignedAddend(Vector left, Vector right) => AddSaturateWithSignedAddend(left, right); + + /// + /// svuint32_t svsqadd[_u32]_m(svbool_t pg, svuint32_t op1, svint32_t op2) + /// USQADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; USQADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svsqadd[_u32]_x(svbool_t pg, svuint32_t op1, svint32_t op2) + /// USQADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; USQADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svsqadd[_u32]_z(svbool_t pg, svuint32_t op1, svint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; USQADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_ET_3A USQADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_usqadd, EA_SCALABLE, REG_V3, REG_P6, REG_V30, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddSaturateWithSignedAddend(Vector left, Vector right) => AddSaturateWithSignedAddend(left, right); + + /// + /// svuint64_t svsqadd[_u64]_m(svbool_t pg, svuint64_t op1, svint64_t op2) + /// USQADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; USQADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svsqadd[_u64]_x(svbool_t pg, svuint64_t op1, svint64_t op2) + /// USQADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; USQADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svsqadd[_u64]_z(svbool_t pg, svuint64_t op1, svint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; USQADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_ET_3A USQADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_usqadd, EA_SCALABLE, REG_V3, REG_P6, REG_V30, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddSaturateWithSignedAddend(Vector left, Vector right) => AddSaturateWithSignedAddend(left, right); + + + /// AddSaturateWithUnsignedAddend : Saturating add with unsigned addend + + /// + /// svint8_t svuqadd[_s8]_m(svbool_t pg, svint8_t op1, svuint8_t op2) + /// SUQADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; SUQADD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svuqadd[_s8]_x(svbool_t pg, svint8_t op1, svuint8_t op2) + /// SUQADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; SUQADD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svuqadd[_s8]_z(svbool_t pg, svint8_t op1, svuint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; SUQADD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_ET_3A SUQADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_suqadd, EA_SCALABLE, REG_V31, REG_P2, REG_V26, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddSaturateWithUnsignedAddend(Vector left, Vector right) => AddSaturateWithUnsignedAddend(left, right); + + /// + /// svint16_t svuqadd[_s16]_m(svbool_t pg, svint16_t op1, svuint16_t op2) + /// SUQADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; SUQADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svuqadd[_s16]_x(svbool_t pg, svint16_t op1, svuint16_t op2) + /// SUQADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; SUQADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svuqadd[_s16]_z(svbool_t pg, svint16_t op1, svuint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; SUQADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_ET_3A SUQADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_suqadd, EA_SCALABLE, REG_V31, REG_P2, REG_V26, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddSaturateWithUnsignedAddend(Vector left, Vector right) => AddSaturateWithUnsignedAddend(left, right); + + /// + /// svint32_t svuqadd[_s32]_m(svbool_t pg, svint32_t op1, svuint32_t op2) + /// SUQADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; SUQADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svuqadd[_s32]_x(svbool_t pg, svint32_t op1, svuint32_t op2) + /// SUQADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; SUQADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svuqadd[_s32]_z(svbool_t pg, svint32_t op1, svuint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; SUQADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_ET_3A SUQADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_suqadd, EA_SCALABLE, REG_V31, REG_P2, REG_V26, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddSaturateWithUnsignedAddend(Vector left, Vector right) => AddSaturateWithUnsignedAddend(left, right); + + /// + /// svint64_t svuqadd[_s64]_m(svbool_t pg, svint64_t op1, svuint64_t op2) + /// SUQADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; SUQADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svuqadd[_s64]_x(svbool_t pg, svint64_t op1, svuint64_t op2) + /// SUQADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; SUQADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svuqadd[_s64]_z(svbool_t pg, svint64_t op1, svuint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; SUQADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_ET_3A SUQADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_suqadd, EA_SCALABLE, REG_V31, REG_P2, REG_V26, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddSaturateWithUnsignedAddend(Vector left, Vector right) => AddSaturateWithUnsignedAddend(left, right); + + + /// AddWideLower : Add wide (bottom) + + /// + /// svint16_t svaddwb[_s16](svint16_t op1, svint8_t op2) + /// SADDWB Zresult.H, Zop1.H, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FM_3A SADDWB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_saddwb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AddWideLower(Vector left, Vector right) => AddWideLower(left, right); + + /// + /// svint32_t svaddwb[_s32](svint32_t op1, svint16_t op2) + /// SADDWB Zresult.S, Zop1.S, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FM_3A SADDWB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_saddwb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AddWideLower(Vector left, Vector right) => AddWideLower(left, right); + + /// + /// svint64_t svaddwb[_s64](svint64_t op1, svint32_t op2) + /// SADDWB Zresult.D, Zop1.D, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FM_3A SADDWB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_saddwb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AddWideLower(Vector left, Vector right) => AddWideLower(left, right); + + /// + /// svuint16_t svaddwb[_u16](svuint16_t op1, svuint8_t op2) + /// UADDWB Zresult.H, Zop1.H, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FM_3A UADDWB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uaddwb, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector AddWideLower(Vector left, Vector right) => AddWideLower(left, right); + + /// + /// svuint32_t svaddwb[_u32](svuint32_t op1, svuint16_t op2) + /// UADDWB Zresult.S, Zop1.S, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FM_3A UADDWB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uaddwb, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector AddWideLower(Vector left, Vector right) => AddWideLower(left, right); + + /// + /// svuint64_t svaddwb[_u64](svuint64_t op1, svuint32_t op2) + /// UADDWB Zresult.D, Zop1.D, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FM_3A UADDWB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uaddwb, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector AddWideLower(Vector left, Vector right) => AddWideLower(left, right); + + + /// AddWideUpper : Add wide (top) + + /// + /// svint16_t svaddwt[_s16](svint16_t op1, svint8_t op2) + /// SADDWT Zresult.H, Zop1.H, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FM_3A SADDWT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_saddwt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector AddWideUpper(Vector left, Vector right) => AddWideUpper(left, right); + + /// + /// svint32_t svaddwt[_s32](svint32_t op1, svint16_t op2) + /// SADDWT Zresult.S, Zop1.S, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FM_3A SADDWT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_saddwt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector AddWideUpper(Vector left, Vector right) => AddWideUpper(left, right); + + /// + /// svint64_t svaddwt[_s64](svint64_t op1, svint32_t op2) + /// SADDWT Zresult.D, Zop1.D, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FM_3A SADDWT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_saddwt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector AddWideUpper(Vector left, Vector right) => AddWideUpper(left, right); + + /// + /// svuint16_t svaddwt[_u16](svuint16_t op1, svuint8_t op2) + /// UADDWT Zresult.H, Zop1.H, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FM_3A UADDWT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uaddwt, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AddWideUpper(Vector left, Vector right) => AddWideUpper(left, right); + + /// + /// svuint32_t svaddwt[_u32](svuint32_t op1, svuint16_t op2) + /// UADDWT Zresult.S, Zop1.S, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FM_3A UADDWT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uaddwt, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AddWideUpper(Vector left, Vector right) => AddWideUpper(left, right); + + /// + /// svuint64_t svaddwt[_u64](svuint64_t op1, svuint32_t op2) + /// UADDWT Zresult.D, Zop1.D, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FM_3A UADDWT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uaddwt, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AddWideUpper(Vector left, Vector right) => AddWideUpper(left, right); + + + /// AddWideningLower : Add long (bottom) + + /// + /// svint16_t svaddlb[_s16](svint8_t op1, svint8_t op2) + /// SADDLB Zresult.H, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FL_3A SADDLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_saddlb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AddWideningLower(Vector left, Vector right) => AddWideningLower(left, right); + + /// + /// svint32_t svaddlb[_s32](svint16_t op1, svint16_t op2) + /// SADDLB Zresult.S, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FL_3A SADDLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_saddlb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AddWideningLower(Vector left, Vector right) => AddWideningLower(left, right); + + /// + /// svint64_t svaddlb[_s64](svint32_t op1, svint32_t op2) + /// SADDLB Zresult.D, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FL_3A SADDLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_saddlb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AddWideningLower(Vector left, Vector right) => AddWideningLower(left, right); + + /// + /// svuint16_t svaddlb[_u16](svuint8_t op1, svuint8_t op2) + /// UADDLB Zresult.H, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FL_3A UADDLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uaddlb, EA_SCALABLE, REG_V24, REG_V25, REG_V26, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AddWideningLower(Vector left, Vector right) => AddWideningLower(left, right); + + /// + /// svuint32_t svaddlb[_u32](svuint16_t op1, svuint16_t op2) + /// UADDLB Zresult.S, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FL_3A UADDLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uaddlb, EA_SCALABLE, REG_V24, REG_V25, REG_V26, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AddWideningLower(Vector left, Vector right) => AddWideningLower(left, right); + + /// + /// svuint64_t svaddlb[_u64](svuint32_t op1, svuint32_t op2) + /// UADDLB Zresult.D, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FL_3A UADDLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uaddlb, EA_SCALABLE, REG_V24, REG_V25, REG_V26, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector AddWideningLower(Vector left, Vector right) => AddWideningLower(left, right); + + + /// AddWideningLowerUpper : Add long (bottom + top) + + /// + /// svint16_t svaddlbt[_s16](svint8_t op1, svint8_t op2) + /// SADDLBT Zresult.H, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FS_3A SADDLBT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_saddlbt, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AddWideningLowerUpper(Vector left, Vector right) => AddWideningLowerUpper(left, right); + + /// + /// svint32_t svaddlbt[_s32](svint16_t op1, svint16_t op2) + /// SADDLBT Zresult.S, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FS_3A SADDLBT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_saddlbt, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AddWideningLowerUpper(Vector left, Vector right) => AddWideningLowerUpper(left, right); + + /// + /// svint64_t svaddlbt[_s64](svint32_t op1, svint32_t op2) + /// SADDLBT Zresult.D, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FS_3A SADDLBT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_saddlbt, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AddWideningLowerUpper(Vector left, Vector right) => AddWideningLowerUpper(left, right); + + + /// AddWideningUpper : Add long (top) + + /// + /// svint16_t svaddlt[_s16](svint8_t op1, svint8_t op2) + /// SADDLT Zresult.H, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FL_3A SADDLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_saddlt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AddWideningUpper(Vector left, Vector right) => AddWideningUpper(left, right); + + /// + /// svint32_t svaddlt[_s32](svint16_t op1, svint16_t op2) + /// SADDLT Zresult.S, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FL_3A SADDLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_saddlt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AddWideningUpper(Vector left, Vector right) => AddWideningUpper(left, right); + + /// + /// svint64_t svaddlt[_s64](svint32_t op1, svint32_t op2) + /// SADDLT Zresult.D, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FL_3A SADDLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_saddlt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AddWideningUpper(Vector left, Vector right) => AddWideningUpper(left, right); + + /// + /// svuint16_t svaddlt[_u16](svuint8_t op1, svuint8_t op2) + /// UADDLT Zresult.H, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FL_3A UADDLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uaddlt, EA_SCALABLE, REG_V27, REG_V28, REG_V29, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AddWideningUpper(Vector left, Vector right) => AddWideningUpper(left, right); + + /// + /// svuint32_t svaddlt[_u32](svuint16_t op1, svuint16_t op2) + /// UADDLT Zresult.S, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FL_3A UADDLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uaddlt, EA_SCALABLE, REG_V27, REG_V28, REG_V29, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AddWideningUpper(Vector left, Vector right) => AddWideningUpper(left, right); + + /// + /// svuint64_t svaddlt[_u64](svuint32_t op1, svuint32_t op2) + /// UADDLT Zresult.D, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FL_3A UADDLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uaddlt, EA_SCALABLE, REG_V27, REG_V28, REG_V29, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector AddWideningUpper(Vector left, Vector right) => AddWideningUpper(left, right); + + + /// BitwiseClearXor : Bitwise clear and exclusive OR + + /// + /// svint8_t svbcax[_s8](svint8_t op1, svint8_t op2, svint8_t op3) + /// BCAX Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BCAX Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BCAX .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bcax, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask) => BitwiseClearXor(xor, value, mask); + + /// + /// svint16_t svbcax[_s16](svint16_t op1, svint16_t op2, svint16_t op3) + /// BCAX Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BCAX Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BCAX .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bcax, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask) => BitwiseClearXor(xor, value, mask); + + /// + /// svint32_t svbcax[_s32](svint32_t op1, svint32_t op2, svint32_t op3) + /// BCAX Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BCAX Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BCAX .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bcax, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask) => BitwiseClearXor(xor, value, mask); + + /// + /// svint64_t svbcax[_s64](svint64_t op1, svint64_t op2, svint64_t op3) + /// BCAX Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BCAX Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BCAX .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bcax, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask) => BitwiseClearXor(xor, value, mask); + + /// + /// svuint8_t svbcax[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// BCAX Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BCAX Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BCAX .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bcax, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask) => BitwiseClearXor(xor, value, mask); + + /// + /// svuint16_t svbcax[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// BCAX Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BCAX Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BCAX .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bcax, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask) => BitwiseClearXor(xor, value, mask); + + /// + /// svuint32_t svbcax[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// BCAX Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BCAX Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BCAX .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bcax, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask) => BitwiseClearXor(xor, value, mask); + + /// + /// svuint64_t svbcax[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// BCAX Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BCAX Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BCAX .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bcax, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask) => BitwiseClearXor(xor, value, mask); + + + /// BitwiseSelect : Bitwise select + + /// + /// svint8_t svbsl[_s8](svint8_t op1, svint8_t op2, svint8_t op3) + /// BSL Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BSL Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BSL .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bsl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right) => BitwiseSelect(select, left, right); + + /// + /// svint16_t svbsl[_s16](svint16_t op1, svint16_t op2, svint16_t op3) + /// BSL Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BSL Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BSL .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bsl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right) => BitwiseSelect(select, left, right); + + /// + /// svint32_t svbsl[_s32](svint32_t op1, svint32_t op2, svint32_t op3) + /// BSL Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BSL Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BSL .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bsl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right) => BitwiseSelect(select, left, right); + + /// + /// svint64_t svbsl[_s64](svint64_t op1, svint64_t op2, svint64_t op3) + /// BSL Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BSL Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BSL .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bsl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right) => BitwiseSelect(select, left, right); + + /// + /// svuint8_t svbsl[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// BSL Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BSL Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BSL .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bsl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right) => BitwiseSelect(select, left, right); + + /// + /// svuint16_t svbsl[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// BSL Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BSL Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BSL .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bsl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right) => BitwiseSelect(select, left, right); + + /// + /// svuint32_t svbsl[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// BSL Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BSL Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BSL .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bsl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right) => BitwiseSelect(select, left, right); + + /// + /// svuint64_t svbsl[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// BSL Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BSL Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BSL .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bsl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right) => BitwiseSelect(select, left, right); + + + + /// BitwiseSelectLeftInverted : Bitwise select with first input inverted + + /// + /// svint8_t svbsl1n[_s8](svint8_t op1, svint8_t op2, svint8_t op3) + /// BSL1N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BSL1N Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BSL1N .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bsl1n, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right) => BitwiseSelectLeftInverted(select, left, right); + + /// + /// svint16_t svbsl1n[_s16](svint16_t op1, svint16_t op2, svint16_t op3) + /// BSL1N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BSL1N Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BSL1N .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bsl1n, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right) => BitwiseSelectLeftInverted(select, left, right); + + /// + /// svint32_t svbsl1n[_s32](svint32_t op1, svint32_t op2, svint32_t op3) + /// BSL1N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BSL1N Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BSL1N .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bsl1n, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right) => BitwiseSelectLeftInverted(select, left, right); + + /// + /// svint64_t svbsl1n[_s64](svint64_t op1, svint64_t op2, svint64_t op3) + /// BSL1N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BSL1N Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BSL1N .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bsl1n, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right) => BitwiseSelectLeftInverted(select, left, right); + + /// + /// svuint8_t svbsl1n[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// BSL1N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BSL1N Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BSL1N .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bsl1n, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right) => BitwiseSelectLeftInverted(select, left, right); + + /// + /// svuint16_t svbsl1n[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// BSL1N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BSL1N Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BSL1N .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bsl1n, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right) => BitwiseSelectLeftInverted(select, left, right); + + /// + /// svuint32_t svbsl1n[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// BSL1N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BSL1N Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BSL1N .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bsl1n, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right) => BitwiseSelectLeftInverted(select, left, right); + + /// + /// svuint64_t svbsl1n[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// BSL1N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BSL1N Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BSL1N .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bsl1n, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right) => BitwiseSelectLeftInverted(select, left, right); + + + /// BitwiseSelectRightInverted : Bitwise select with second input inverted + + /// + /// svint8_t svbsl2n[_s8](svint8_t op1, svint8_t op2, svint8_t op3) + /// BSL2N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BSL2N Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BSL2N .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bsl2n, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right) => BitwiseSelectRightInverted(select, left, right); + + /// + /// svint16_t svbsl2n[_s16](svint16_t op1, svint16_t op2, svint16_t op3) + /// BSL2N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BSL2N Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BSL2N .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bsl2n, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right) => BitwiseSelectRightInverted(select, left, right); + + /// + /// svint32_t svbsl2n[_s32](svint32_t op1, svint32_t op2, svint32_t op3) + /// BSL2N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BSL2N Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BSL2N .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bsl2n, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right) => BitwiseSelectRightInverted(select, left, right); + + /// + /// svint64_t svbsl2n[_s64](svint64_t op1, svint64_t op2, svint64_t op3) + /// BSL2N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BSL2N Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BSL2N .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bsl2n, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right) => BitwiseSelectRightInverted(select, left, right); + + /// + /// svuint8_t svbsl2n[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// BSL2N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BSL2N Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BSL2N .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bsl2n, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right) => BitwiseSelectRightInverted(select, left, right); + + /// + /// svuint16_t svbsl2n[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// BSL2N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BSL2N Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BSL2N .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bsl2n, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right) => BitwiseSelectRightInverted(select, left, right); + + /// + /// svuint32_t svbsl2n[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// BSL2N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BSL2N Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BSL2N .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bsl2n, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right) => BitwiseSelectRightInverted(select, left, right); + + /// + /// svuint64_t svbsl2n[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// BSL2N Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; BSL2N Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A BSL2N .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_bsl2n, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right) => BitwiseSelectRightInverted(select, left, right); + + + /// CountMatchingElements : Count matching elements + + /// + /// svuint32_t svhistcnt[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// HISTCNT Zresult.S, Pg/Z, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_GI_4A HISTCNT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_histcnt, EA_SCALABLE, REG_V0, REG_P0, REG_V1, REG_V2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_histcnt, EA_SCALABLE, REG_V3, REG_P7, REG_V4, REG_V5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CountMatchingElements(Vector mask, Vector left, Vector right) => CountMatchingElements(mask, left, right); + + /// + /// svuint32_t svhistcnt[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// HISTCNT Zresult.S, Pg/Z, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_GI_4A HISTCNT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_histcnt, EA_SCALABLE, REG_V0, REG_P0, REG_V1, REG_V2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_histcnt, EA_SCALABLE, REG_V3, REG_P7, REG_V4, REG_V5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CountMatchingElements(Vector mask, Vector left, Vector right) => CountMatchingElements(mask, left, right); + + /// + /// svuint64_t svhistcnt[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// HISTCNT Zresult.D, Pg/Z, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_GI_4A HISTCNT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_histcnt, EA_SCALABLE, REG_V0, REG_P0, REG_V1, REG_V2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_histcnt, EA_SCALABLE, REG_V3, REG_P7, REG_V4, REG_V5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CountMatchingElements(Vector mask, Vector left, Vector right) => CountMatchingElements(mask, left, right); + + /// + /// svuint64_t svhistcnt[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// HISTCNT Zresult.D, Pg/Z, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_GI_4A HISTCNT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_histcnt, EA_SCALABLE, REG_V0, REG_P0, REG_V1, REG_V2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_histcnt, EA_SCALABLE, REG_V3, REG_P7, REG_V4, REG_V5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CountMatchingElements(Vector mask, Vector left, Vector right) => CountMatchingElements(mask, left, right); + + + /// CountMatchingElementsIn128BitSegments : Count matching elements in 128-bit segments + + /// + /// svuint8_t svhistseg[_s8](svint8_t op1, svint8_t op2) + /// HISTSEG Zresult.B, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_GF_3A HISTSEG .B, .B, .B + /// theEmitter->emitIns_R_R_R(INS_sve_histseg, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CountMatchingElementsIn128BitSegments(Vector left, Vector right) => CountMatchingElementsIn128BitSegments(left, right); + + /// + /// svuint8_t svhistseg[_u8](svuint8_t op1, svuint8_t op2) + /// HISTSEG Zresult.B, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_GF_3A HISTSEG .B, .B, .B + /// theEmitter->emitIns_R_R_R(INS_sve_histseg, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector CountMatchingElementsIn128BitSegments(Vector left, Vector right) => CountMatchingElementsIn128BitSegments(left, right); + + + /// CreateWhileGreaterThanMask : While decrementing scalar is greater than + + /// + /// svbool_t svwhilegt_b8[_s32](int32_t op1, int32_t op2) + /// WHILEGT Presult.B, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEGT ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_4BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_B); + /// IF_SVE_DX_3A WHILEGT {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEGT ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P10, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P11, REG_R6, REG_R7, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanMask(int left, int right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b8[_s64](int64_t op1, int64_t op2) + /// WHILEGT Presult.B, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEGT ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_4BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_B); + /// IF_SVE_DX_3A WHILEGT {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEGT ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P10, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P11, REG_R6, REG_R7, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanMask(long left, long right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b8[_u32](uint32_t op1, uint32_t op2) + /// WHILEHI Presult.B, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEHI ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_4BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_H); + /// IF_SVE_DX_3A WHILEHI {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEHI ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P12, REG_R8, REG_R9, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P13, REG_R10, REG_R11, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanMask(uint left, uint right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b8[_u64](uint64_t op1, uint64_t op2) + /// WHILEHI Presult.B, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEHI ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_4BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_H); + /// IF_SVE_DX_3A WHILEHI {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEHI ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P12, REG_R8, REG_R9, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P13, REG_R10, REG_R11, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanMask(ulong left, ulong right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b16[_s32](int32_t op1, int32_t op2) + /// WHILEGT Presult.H, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEGT ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_4BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_B); + /// IF_SVE_DX_3A WHILEGT {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEGT ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P10, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P11, REG_R6, REG_R7, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanMask(int left, int right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b16[_s64](int64_t op1, int64_t op2) + /// WHILEGT Presult.H, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEGT ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_4BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_B); + /// IF_SVE_DX_3A WHILEGT {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEGT ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P10, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P11, REG_R6, REG_R7, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanMask(long left, long right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b16[_u32](uint32_t op1, uint32_t op2) + /// WHILEHI Presult.H, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEHI ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_4BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_H); + /// IF_SVE_DX_3A WHILEHI {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEHI ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P12, REG_R8, REG_R9, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P13, REG_R10, REG_R11, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanMask(uint left, uint right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b16[_u64](uint64_t op1, uint64_t op2) + /// WHILEHI Presult.H, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEHI ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_4BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_H); + /// IF_SVE_DX_3A WHILEHI {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEHI ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P12, REG_R8, REG_R9, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P13, REG_R10, REG_R11, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanMask(ulong left, ulong right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b32[_s32](int32_t op1, int32_t op2) + /// WHILEGT Presult.S, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEGT ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_4BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_B); + /// IF_SVE_DX_3A WHILEGT {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEGT ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P10, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P11, REG_R6, REG_R7, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanMask(int left, int right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b32[_s64](int64_t op1, int64_t op2) + /// WHILEGT Presult.S, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEGT ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_4BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_B); + /// IF_SVE_DX_3A WHILEGT {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEGT ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P10, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P11, REG_R6, REG_R7, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanMask(long left, long right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b32[_u32](uint32_t op1, uint32_t op2) + /// WHILEHI Presult.S, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEHI ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_4BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_H); + /// IF_SVE_DX_3A WHILEHI {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEHI ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P12, REG_R8, REG_R9, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P13, REG_R10, REG_R11, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanMask(uint left, uint right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b32[_u64](uint64_t op1, uint64_t op2) + /// WHILEHI Presult.S, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEHI ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_4BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_H); + /// IF_SVE_DX_3A WHILEHI {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEHI ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P12, REG_R8, REG_R9, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P13, REG_R10, REG_R11, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanMask(ulong left, ulong right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b64[_s32](int32_t op1, int32_t op2) + /// WHILEGT Presult.D, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEGT ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_4BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_B); + /// IF_SVE_DX_3A WHILEGT {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEGT ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P10, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P11, REG_R6, REG_R7, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanMask(int left, int right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b64[_s64](int64_t op1, int64_t op2) + /// WHILEGT Presult.D, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEGT ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_4BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_B); + /// IF_SVE_DX_3A WHILEGT {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEGT ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P10, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilegt, EA_8BYTE, REG_P11, REG_R6, REG_R7, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanMask(long left, long right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b64[_u32](uint32_t op1, uint32_t op2) + /// WHILEHI Presult.D, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEHI ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_4BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_H); + /// IF_SVE_DX_3A WHILEHI {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEHI ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P12, REG_R8, REG_R9, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P13, REG_R10, REG_R11, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanMask(uint left, uint right) => CreateWhileGreaterThanMask(left, right); + + /// + /// svbool_t svwhilegt_b64[_u64](uint64_t op1, uint64_t op2) + /// WHILEHI Presult.D, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEHI ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_4BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_H); + /// IF_SVE_DX_3A WHILEHI {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEHI ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P12, REG_R8, REG_R9, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehi, EA_8BYTE, REG_P13, REG_R10, REG_R11, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanMask(ulong left, ulong right) => CreateWhileGreaterThanMask(left, right); + + + /// CreateWhileGreaterThanOrEqualMask : While decrementing scalar is greater than or equal to + + /// + /// svbool_t svwhilege_b8[_s32](int32_t op1, int32_t op2) + /// WHILEGE Presult.B, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEGE ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_4BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_B); + /// IF_SVE_DX_3A WHILEGE {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEGE ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P8, REG_R0, REG_R1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P9, REG_R2, REG_R3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(int left, int right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b8[_s64](int64_t op1, int64_t op2) + /// WHILEGE Presult.B, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEGE ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_4BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_B); + /// IF_SVE_DX_3A WHILEGE {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEGE ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P8, REG_R0, REG_R1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P9, REG_R2, REG_R3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(long left, long right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b8[_u32](uint32_t op1, uint32_t op2) + /// WHILEHS Presult.B, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEHS ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_4BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_H); + /// IF_SVE_DX_3A WHILEHS {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEHS ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P14, REG_R12, REG_R13, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P15, REG_R14, REG_R15, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(uint left, uint right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b8[_u64](uint64_t op1, uint64_t op2) + /// WHILEHS Presult.B, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEHS ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_4BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_H); + /// IF_SVE_DX_3A WHILEHS {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEHS ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P14, REG_R12, REG_R13, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P15, REG_R14, REG_R15, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(ulong left, ulong right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b16[_s32](int32_t op1, int32_t op2) + /// WHILEGE Presult.H, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEGE ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_4BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_B); + /// IF_SVE_DX_3A WHILEGE {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEGE ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P8, REG_R0, REG_R1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P9, REG_R2, REG_R3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(int left, int right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b16[_s64](int64_t op1, int64_t op2) + /// WHILEGE Presult.H, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEGE ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_4BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_B); + /// IF_SVE_DX_3A WHILEGE {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEGE ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P8, REG_R0, REG_R1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P9, REG_R2, REG_R3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(long left, long right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b16[_u32](uint32_t op1, uint32_t op2) + /// WHILEHS Presult.H, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEHS ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_4BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_H); + /// IF_SVE_DX_3A WHILEHS {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEHS ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P14, REG_R12, REG_R13, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P15, REG_R14, REG_R15, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(uint left, uint right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b16[_u64](uint64_t op1, uint64_t op2) + /// WHILEHS Presult.H, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEHS ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_4BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_H); + /// IF_SVE_DX_3A WHILEHS {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEHS ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P14, REG_R12, REG_R13, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P15, REG_R14, REG_R15, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(ulong left, ulong right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b32[_s32](int32_t op1, int32_t op2) + /// WHILEGE Presult.S, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEGE ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_4BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_B); + /// IF_SVE_DX_3A WHILEGE {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEGE ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P8, REG_R0, REG_R1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P9, REG_R2, REG_R3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(int left, int right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b32[_s64](int64_t op1, int64_t op2) + /// WHILEGE Presult.S, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEGE ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_4BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_B); + /// IF_SVE_DX_3A WHILEGE {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEGE ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P8, REG_R0, REG_R1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P9, REG_R2, REG_R3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(long left, long right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b32[_u32](uint32_t op1, uint32_t op2) + /// WHILEHS Presult.S, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEHS ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_4BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_H); + /// IF_SVE_DX_3A WHILEHS {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEHS ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P14, REG_R12, REG_R13, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P15, REG_R14, REG_R15, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(uint left, uint right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b32[_u64](uint64_t op1, uint64_t op2) + /// WHILEHS Presult.S, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEHS ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_4BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_H); + /// IF_SVE_DX_3A WHILEHS {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEHS ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P14, REG_R12, REG_R13, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P15, REG_R14, REG_R15, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(ulong left, ulong right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b64[_s32](int32_t op1, int32_t op2) + /// WHILEGE Presult.D, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEGE ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_4BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_B); + /// IF_SVE_DX_3A WHILEGE {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEGE ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P8, REG_R0, REG_R1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P9, REG_R2, REG_R3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(int left, int right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b64[_s64](int64_t op1, int64_t op2) + /// WHILEGE Presult.D, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEGE ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_4BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_B); + /// IF_SVE_DX_3A WHILEGE {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEGE ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P8, REG_R0, REG_R1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilege, EA_8BYTE, REG_P9, REG_R2, REG_R3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(long left, long right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b64[_u32](uint32_t op1, uint32_t op2) + /// WHILEHS Presult.D, Wop1, Wop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEHS ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_4BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_H); + /// IF_SVE_DX_3A WHILEHS {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEHS ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P14, REG_R12, REG_R13, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P15, REG_R14, REG_R15, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(uint left, uint right) => CreateWhileGreaterThanOrEqualMask(left, right); + + /// + /// svbool_t svwhilege_b64[_u64](uint64_t op1, uint64_t op2) + /// WHILEHS Presult.D, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DT_3A WHILEHS ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_4BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_H); + /// IF_SVE_DX_3A WHILEHS {., .}, , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_PREDICATE_PAIR); + /// IF_SVE_DY_3A WHILEHS ., , , + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P14, REG_R12, REG_R13, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R_R(INS_sve_whilehs, EA_8BYTE, REG_P15, REG_R14, REG_R15, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe Vector CreateWhileGreaterThanOrEqualMask(ulong left, ulong right) => CreateWhileGreaterThanOrEqualMask(left, right); + + + /// CreateWhileReadAfterWriteMask : While free of read-after-write conflicts + + /// + /// svbool_t svwhilerw[_s8](const int8_t *op1, const int8_t *op2) + /// WHILERW Presult.B, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DU_3A WHILERW ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(sbyte* left, sbyte* right) => CreateWhileReadAfterWriteMask(left, right); + + /// + /// svbool_t svwhilerw[_s16](const int16_t *op1, const int16_t *op2) + /// WHILERW Presult.H, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DU_3A WHILERW ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(short* left, short* right) => CreateWhileReadAfterWriteMask(left, right); + + /// + /// svbool_t svwhilerw[_s32](const int32_t *op1, const int32_t *op2) + /// WHILERW Presult.S, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DU_3A WHILERW ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(int* left, int* right) => CreateWhileReadAfterWriteMask(left, right); + + /// + /// svbool_t svwhilerw[_s64](const int64_t *op1, const int64_t *op2) + /// WHILERW Presult.D, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DU_3A WHILERW ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(long* left, long* right) => CreateWhileReadAfterWriteMask(left, right); + + /// + /// svbool_t svwhilerw[_u8](const uint8_t *op1, const uint8_t *op2) + /// WHILERW Presult.B, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DU_3A WHILERW ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(byte* left, byte* right) => CreateWhileReadAfterWriteMask(left, right); + + /// + /// svbool_t svwhilerw[_u16](const uint16_t *op1, const uint16_t *op2) + /// WHILERW Presult.H, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DU_3A WHILERW ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(ushort* left, ushort* right) => CreateWhileReadAfterWriteMask(left, right); + + /// + /// svbool_t svwhilerw[_u32](const uint32_t *op1, const uint32_t *op2) + /// WHILERW Presult.S, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DU_3A WHILERW ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(uint* left, uint* right) => CreateWhileReadAfterWriteMask(left, right); + + /// + /// svbool_t svwhilerw[_u64](const uint64_t *op1, const uint64_t *op2) + /// WHILERW Presult.D, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DU_3A WHILERW ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(ulong* left, ulong* right) => CreateWhileReadAfterWriteMask(left, right); + + /// + /// svbool_t svwhilerw[_f32](const float32_t *op1, const float32_t *op2) + /// WHILERW Presult.S, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DU_3A WHILERW ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(float* left, float* right) => CreateWhileReadAfterWriteMask(left, right); + + /// + /// svbool_t svwhilerw[_f64](const float64_t *op1, const float64_t *op2) + /// WHILERW Presult.D, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DU_3A WHILERW ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(double* left, double* right) => CreateWhileReadAfterWriteMask(left, right); + + + /// CreateWhileWriteAfterReadMask : While free of write-after-read conflicts + + /// + /// svbool_t svwhilewr[_s8](const int8_t *op1, const int8_t *op2) + /// WHILEWR Presult.B, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DU_3A WHILEWR ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(sbyte* left, sbyte* right) => CreateWhileWriteAfterReadMask(left, right); + + /// + /// svbool_t svwhilewr[_s16](const int16_t *op1, const int16_t *op2) + /// WHILEWR Presult.H, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DU_3A WHILEWR ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(short* left, short* right) => CreateWhileWriteAfterReadMask(left, right); + + /// + /// svbool_t svwhilewr[_s32](const int32_t *op1, const int32_t *op2) + /// WHILEWR Presult.S, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DU_3A WHILEWR ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(int* left, int* right) => CreateWhileWriteAfterReadMask(left, right); + + /// + /// svbool_t svwhilewr[_s64](const int64_t *op1, const int64_t *op2) + /// WHILEWR Presult.D, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DU_3A WHILEWR ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(long* left, long* right) => CreateWhileWriteAfterReadMask(left, right); + + /// + /// svbool_t svwhilewr[_u8](const uint8_t *op1, const uint8_t *op2) + /// WHILEWR Presult.B, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DU_3A WHILEWR ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(byte* left, byte* right) => CreateWhileWriteAfterReadMask(left, right); + + /// + /// svbool_t svwhilewr[_u16](const uint16_t *op1, const uint16_t *op2) + /// WHILEWR Presult.H, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DU_3A WHILEWR ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(ushort* left, ushort* right) => CreateWhileWriteAfterReadMask(left, right); + + /// + /// svbool_t svwhilewr[_u32](const uint32_t *op1, const uint32_t *op2) + /// WHILEWR Presult.S, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DU_3A WHILEWR ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(uint* left, uint* right) => CreateWhileWriteAfterReadMask(left, right); + + /// + /// svbool_t svwhilewr[_u64](const uint64_t *op1, const uint64_t *op2) + /// WHILEWR Presult.D, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DU_3A WHILEWR ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(ulong* left, ulong* right) => CreateWhileWriteAfterReadMask(left, right); + + /// + /// svbool_t svwhilewr[_f32](const float32_t *op1, const float32_t *op2) + /// WHILEWR Presult.S, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DU_3A WHILEWR ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(float* left, float* right) => CreateWhileWriteAfterReadMask(left, right); + + /// + /// svbool_t svwhilewr[_f64](const float64_t *op1, const float64_t *op2) + /// WHILEWR Presult.D, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DU_3A WHILEWR ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(double* left, double* right) => CreateWhileWriteAfterReadMask(left, right); + + + /// DotProductComplex : Complex dot product + + /// + /// svint32_t svcdot[_s32](svint32_t op1, svint8_t op2, svint8_t op3, uint64_t imm_rotation) + /// CDOT Ztied1.S, Zop2.B, Zop3.B, #imm_rotation + /// MOVPRFX Zresult, Zop1; CDOT Zresult.S, Zop2.B, Zop3.B, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_EJ_3A CDOT ., ., ., + /// theEmitter->emitIns_R_R_R_I(INS_sve_cdot, EA_SCALABLE, REG_V0, REG_V1, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cdot, EA_SCALABLE, REG_V3, REG_V4, REG_V5, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cdot, EA_SCALABLE, REG_V6, REG_V7, REG_V8, 180, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cdot, EA_SCALABLE, REG_V9, REG_V10, REG_V11, 270, INS_OPTS_SCALABLE_D); + /// IF_SVE_FA_3A CDOT .S, .B, .B[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V0, REG_V7, REG_V1, 3, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V2, REG_V5, REG_V3, 2, 90, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V4, REG_V3, REG_V5, 1, 180, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V6, REG_V1, REG_V7, 0, 270, INS_OPTS_SCALABLE_B); + /// IF_SVE_FA_3B CDOT .D, .H, .H[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0, 180, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1, 270, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector DotProductComplex(Vector op1, Vector op2, Vector op3, [ConstantExpected] byte rotation) => DotProductComplex(op1, op2, op3, rotation); + + /// + /// svint32_t svcdot_lane[_s32](svint32_t op1, svint8_t op2, svint8_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// CDOT Ztied1.S, Zop2.B, Zop3.B[imm_index], #imm_rotation + /// MOVPRFX Zresult, Zop1; CDOT Zresult.S, Zop2.B, Zop3.B[imm_index], #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_EJ_3A CDOT ., ., ., + /// theEmitter->emitIns_R_R_R_I(INS_sve_cdot, EA_SCALABLE, REG_V0, REG_V1, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cdot, EA_SCALABLE, REG_V3, REG_V4, REG_V5, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cdot, EA_SCALABLE, REG_V6, REG_V7, REG_V8, 180, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cdot, EA_SCALABLE, REG_V9, REG_V10, REG_V11, 270, INS_OPTS_SCALABLE_D); + /// IF_SVE_FA_3A CDOT .S, .B, .B[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V0, REG_V7, REG_V1, 3, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V2, REG_V5, REG_V3, 2, 90, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V4, REG_V3, REG_V5, 1, 180, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V6, REG_V1, REG_V7, 0, 270, INS_OPTS_SCALABLE_B); + /// IF_SVE_FA_3B CDOT .D, .H, .H[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0, 180, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1, 270, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector DotProductComplex(Vector op1, Vector op2, Vector op3, ulong imm_index, [ConstantExpected] byte rotation) => DotProductComplex(op1, op2, op3, imm_index, rotation); + + /// + /// svint64_t svcdot[_s64](svint64_t op1, svint16_t op2, svint16_t op3, uint64_t imm_rotation) + /// CDOT Ztied1.D, Zop2.H, Zop3.H, #imm_rotation + /// MOVPRFX Zresult, Zop1; CDOT Zresult.D, Zop2.H, Zop3.H, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_EJ_3A CDOT ., ., ., + /// theEmitter->emitIns_R_R_R_I(INS_sve_cdot, EA_SCALABLE, REG_V0, REG_V1, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cdot, EA_SCALABLE, REG_V3, REG_V4, REG_V5, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cdot, EA_SCALABLE, REG_V6, REG_V7, REG_V8, 180, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cdot, EA_SCALABLE, REG_V9, REG_V10, REG_V11, 270, INS_OPTS_SCALABLE_D); + /// IF_SVE_FA_3A CDOT .S, .B, .B[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V0, REG_V7, REG_V1, 3, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V2, REG_V5, REG_V3, 2, 90, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V4, REG_V3, REG_V5, 1, 180, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V6, REG_V1, REG_V7, 0, 270, INS_OPTS_SCALABLE_B); + /// IF_SVE_FA_3B CDOT .D, .H, .H[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0, 180, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1, 270, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector DotProductComplex(Vector op1, Vector op2, Vector op3, [ConstantExpected] byte rotation) => DotProductComplex(op1, op2, op3, rotation); + + /// + /// svint64_t svcdot_lane[_s64](svint64_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// CDOT Ztied1.D, Zop2.H, Zop3.H[imm_index], #imm_rotation + /// MOVPRFX Zresult, Zop1; CDOT Zresult.D, Zop2.H, Zop3.H[imm_index], #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_EJ_3A CDOT ., ., ., + /// theEmitter->emitIns_R_R_R_I(INS_sve_cdot, EA_SCALABLE, REG_V0, REG_V1, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cdot, EA_SCALABLE, REG_V3, REG_V4, REG_V5, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cdot, EA_SCALABLE, REG_V6, REG_V7, REG_V8, 180, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cdot, EA_SCALABLE, REG_V9, REG_V10, REG_V11, 270, INS_OPTS_SCALABLE_D); + /// IF_SVE_FA_3A CDOT .S, .B, .B[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V0, REG_V7, REG_V1, 3, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V2, REG_V5, REG_V3, 2, 90, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V4, REG_V3, REG_V5, 1, 180, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V6, REG_V1, REG_V7, 0, 270, INS_OPTS_SCALABLE_B); + /// IF_SVE_FA_3B CDOT .D, .H, .H[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0, 180, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cdot, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1, 270, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector DotProductComplex(Vector op1, Vector op2, Vector op3, ulong imm_index, [ConstantExpected] byte rotation) => DotProductComplex(op1, op2, op3, imm_index, rotation); + + + /// DownConvertNarrowingUpper : Down convert and narrow (top) + + /// + /// svfloat32_t svcvtnt_f32[_f64]_m(svfloat32_t even, svbool_t pg, svfloat64_t op) + /// FCVTNT Ztied.S, Pg/M, Zop.D + /// svfloat32_t svcvtnt_f32[_f64]_x(svfloat32_t even, svbool_t pg, svfloat64_t op) + /// FCVTNT Ztied.S, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_GQ_3A FCVTNT .H, /M, .S + /// theEmitter->emitIns_R_R_R(INS_sve_fcvtnt, EA_SCALABLE, REG_V18, REG_P3, REG_V9, INS_OPTS_S_TO_H); + /// theEmitter->emitIns_R_R_R(INS_sve_fcvtnt, EA_SCALABLE, REG_V12, REG_P3, REG_V5, INS_OPTS_D_TO_S); + /// IF_SVE_HG_2A FCVTNT .B, {.S-.S } + /// theEmitter->emitIns_R_R(INS_sve_fcvtnt, EA_SCALABLE, REG_V14, REG_V15); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector DownConvertNarrowingUpper(Vector value) => DownConvertNarrowingUpper(value); + + + /// DownConvertRoundingOdd : Down convert, rounding to odd + + /// + /// svfloat32_t svcvtx_f32[_f64]_m(svfloat32_t inactive, svbool_t pg, svfloat64_t op) + /// FCVTX Ztied.S, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; FCVTX Zresult.S, Pg/M, Zop.D + /// svfloat32_t svcvtx_f32[_f64]_x(svbool_t pg, svfloat64_t op) + /// FCVTX Ztied.S, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; FCVTX Zresult.S, Pg/M, Zop.D + /// svfloat32_t svcvtx_f32[_f64]_z(svbool_t pg, svfloat64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; FCVTX Zresult.S, Pg/M, Zop.D + /// + /// codegenarm64test: + /// sve_fcvtx - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector DownConvertRoundingOdd(Vector value) => DownConvertRoundingOdd(value); + + + /// DownConvertRoundingOddUpper : Down convert, rounding to odd (top) + + /// + /// svfloat32_t svcvtxnt_f32[_f64]_m(svfloat32_t even, svbool_t pg, svfloat64_t op) + /// FCVTXNT Ztied.S, Pg/M, Zop.D + /// svfloat32_t svcvtxnt_f32[_f64]_x(svfloat32_t even, svbool_t pg, svfloat64_t op) + /// FCVTXNT Ztied.S, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_GQ_3A FCVTXNT .S, /M, .D + /// theEmitter->emitIns_R_R_R(INS_sve_fcvtxnt, EA_SCALABLE, REG_V1, REG_P2, REG_V3); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector DownConvertRoundingOddUpper(Vector value) => DownConvertRoundingOddUpper(value); + + + /// GatherVectorByteZeroExtendNonTemporal : Load 8-bit data and zero-extend, non-temporal + + /// + /// svint32_t svldnt1ub_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// LDNT1B Zresult.S, Pg/Z, [Zbases.S, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1b, EA_SCALABLE, REG_V0, REG_P1, REG_R2, -5, INS_OPTS_SCALABLE_B); + /// IF_SVE_IF_4A LDNT1B {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V3, REG_P2, REG_V1, REG_R0, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1B {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V0, REG_P2, REG_V4, REG_R3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorByteZeroExtendNonTemporal(mask, addresses); + + /// + /// svint32_t svldnt1ub_gather_[u32]offset_s32(svbool_t pg, const uint8_t *base, svuint32_t offsets) + /// LDNT1B Zresult.S, Pg/Z, [Zoffsets.S, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1b, EA_SCALABLE, REG_V0, REG_P1, REG_R2, -5, INS_OPTS_SCALABLE_B); + /// IF_SVE_IF_4A LDNT1B {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V3, REG_P2, REG_V1, REG_R0, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1B {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V0, REG_P2, REG_V4, REG_R3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1ub_gather_[s64]offset_s64(svbool_t pg, const uint8_t *base, svint64_t offsets) + /// LDNT1B Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1b, EA_SCALABLE, REG_V0, REG_P1, REG_R2, -5, INS_OPTS_SCALABLE_B); + /// IF_SVE_IF_4A LDNT1B {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V3, REG_P2, REG_V1, REG_R0, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1B {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V0, REG_P2, REG_V4, REG_R3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1ub_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// LDNT1B Zresult.D, Pg/Z, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1b, EA_SCALABLE, REG_V0, REG_P1, REG_R2, -5, INS_OPTS_SCALABLE_B); + /// IF_SVE_IF_4A LDNT1B {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V3, REG_P2, REG_V1, REG_R0, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1B {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V0, REG_P2, REG_V4, REG_R3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorByteZeroExtendNonTemporal(mask, addresses); + + /// + /// svint64_t svldnt1ub_gather_[u64]offset_s64(svbool_t pg, const uint8_t *base, svuint64_t offsets) + /// LDNT1B Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1b, EA_SCALABLE, REG_V0, REG_P1, REG_R2, -5, INS_OPTS_SCALABLE_B); + /// IF_SVE_IF_4A LDNT1B {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V3, REG_P2, REG_V1, REG_R0, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1B {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V0, REG_P2, REG_V4, REG_R3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svuint32_t svldnt1ub_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// LDNT1B Zresult.S, Pg/Z, [Zbases.S, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1b, EA_SCALABLE, REG_V0, REG_P1, REG_R2, -5, INS_OPTS_SCALABLE_B); + /// IF_SVE_IF_4A LDNT1B {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V3, REG_P2, REG_V1, REG_R0, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1B {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V0, REG_P2, REG_V4, REG_R3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorByteZeroExtendNonTemporal(mask, addresses); + + /// + /// svuint32_t svldnt1ub_gather_[u32]offset_u32(svbool_t pg, const uint8_t *base, svuint32_t offsets) + /// LDNT1B Zresult.S, Pg/Z, [Zoffsets.S, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1b, EA_SCALABLE, REG_V0, REG_P1, REG_R2, -5, INS_OPTS_SCALABLE_B); + /// IF_SVE_IF_4A LDNT1B {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V3, REG_P2, REG_V1, REG_R0, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1B {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V0, REG_P2, REG_V4, REG_R3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1ub_gather_[s64]offset_u64(svbool_t pg, const uint8_t *base, svint64_t offsets) + /// LDNT1B Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1b, EA_SCALABLE, REG_V0, REG_P1, REG_R2, -5, INS_OPTS_SCALABLE_B); + /// IF_SVE_IF_4A LDNT1B {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V3, REG_P2, REG_V1, REG_R0, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1B {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V0, REG_P2, REG_V4, REG_R3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1ub_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// LDNT1B Zresult.D, Pg/Z, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1b, EA_SCALABLE, REG_V0, REG_P1, REG_R2, -5, INS_OPTS_SCALABLE_B); + /// IF_SVE_IF_4A LDNT1B {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V3, REG_P2, REG_V1, REG_R0, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1B {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V0, REG_P2, REG_V4, REG_R3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorByteZeroExtendNonTemporal(mask, addresses); + + /// + /// svuint64_t svldnt1ub_gather_[u64]offset_u64(svbool_t pg, const uint8_t *base, svuint64_t offsets) + /// LDNT1B Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1B {.B }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1b, EA_SCALABLE, REG_V0, REG_P1, REG_R2, -5, INS_OPTS_SCALABLE_B); + /// IF_SVE_IF_4A LDNT1B {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V3, REG_P2, REG_V1, REG_R0, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1B {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V0, REG_P2, REG_V4, REG_R3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1B {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1b, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector GatherVectorByteZeroExtendNonTemporal(Vector mask, byte* address, Vector offsets) => GatherVectorByteZeroExtendNonTemporal(mask, address, offsets); + + + /// GatherVectorInt16SignExtendNonTemporal : Load 16-bit data and sign-extend, non-temporal + + /// + /// svint32_t svldnt1sh_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// LDNT1SH Zresult.S, Pg/Z, [Zbases.S, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IF_4A LDNT1SH {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P4, REG_V1, REG_R2, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1SH {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P2, REG_V1, REG_R0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorInt16SignExtendNonTemporal(mask, addresses); + + /// + /// svint64_t svldnt1sh_gather_[s64]index_s64(svbool_t pg, const int16_t *base, svint64_t indices) + /// LDNT1SH Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IF_4A LDNT1SH {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P4, REG_V1, REG_R2, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1SH {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P2, REG_V1, REG_R0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtendNonTemporal(mask, address, indices); + + /// + /// svint64_t svldnt1sh_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// LDNT1SH Zresult.D, Pg/Z, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IF_4A LDNT1SH {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P4, REG_V1, REG_R2, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1SH {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P2, REG_V1, REG_R0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorInt16SignExtendNonTemporal(mask, addresses); + + /// + /// svint64_t svldnt1sh_gather_[u64]index_s64(svbool_t pg, const int16_t *base, svuint64_t indices) + /// LDNT1SH Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IF_4A LDNT1SH {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P4, REG_V1, REG_R2, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1SH {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P2, REG_V1, REG_R0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtendNonTemporal(mask, address, indices); + + /// + /// svuint32_t svldnt1sh_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// LDNT1SH Zresult.S, Pg/Z, [Zbases.S, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IF_4A LDNT1SH {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P4, REG_V1, REG_R2, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1SH {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P2, REG_V1, REG_R0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorInt16SignExtendNonTemporal(mask, addresses); + + /// + /// svuint64_t svldnt1sh_gather_[s64]index_u64(svbool_t pg, const int16_t *base, svint64_t indices) + /// LDNT1SH Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IF_4A LDNT1SH {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P4, REG_V1, REG_R2, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1SH {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P2, REG_V1, REG_R0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtendNonTemporal(mask, address, indices); + + /// + /// svuint64_t svldnt1sh_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// LDNT1SH Zresult.D, Pg/Z, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IF_4A LDNT1SH {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P4, REG_V1, REG_R2, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1SH {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P2, REG_V1, REG_R0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorInt16SignExtendNonTemporal(mask, addresses); + + /// + /// svuint64_t svldnt1sh_gather_[u64]index_u64(svbool_t pg, const int16_t *base, svuint64_t indices) + /// LDNT1SH Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IF_4A LDNT1SH {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P4, REG_V1, REG_R2, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1SH {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P2, REG_V1, REG_R0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16SignExtendNonTemporal(Vector mask, short* address, Vector indices) => GatherVectorInt16SignExtendNonTemporal(mask, address, indices); + + + /// GatherVectorInt16WithByteOffsetsSignExtendNonTemporal : Load 16-bit data and sign-extend, non-temporal + + /// + /// svint32_t svldnt1sh_gather_[u32]offset_s32(svbool_t pg, const int16_t *base, svuint32_t offsets) + /// LDNT1SH Zresult.S, Pg/Z, [Zoffsets.S, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IF_4A LDNT1SH {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P4, REG_V1, REG_R2, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1SH {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P2, REG_V1, REG_R0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1sh_gather_[s64]offset_s64(svbool_t pg, const int16_t *base, svint64_t offsets) + /// LDNT1SH Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IF_4A LDNT1SH {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P4, REG_V1, REG_R2, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1SH {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P2, REG_V1, REG_R0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1sh_gather_[u64]offset_s64(svbool_t pg, const int16_t *base, svuint64_t offsets) + /// LDNT1SH Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IF_4A LDNT1SH {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P4, REG_V1, REG_R2, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1SH {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P2, REG_V1, REG_R0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + /// + /// svuint32_t svldnt1sh_gather_[u32]offset_u32(svbool_t pg, const int16_t *base, svuint32_t offsets) + /// LDNT1SH Zresult.S, Pg/Z, [Zoffsets.S, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IF_4A LDNT1SH {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P4, REG_V1, REG_R2, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1SH {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P2, REG_V1, REG_R0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1sh_gather_[s64]offset_u64(svbool_t pg, const int16_t *base, svint64_t offsets) + /// LDNT1SH Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IF_4A LDNT1SH {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P4, REG_V1, REG_R2, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1SH {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P2, REG_V1, REG_R0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1sh_gather_[u64]offset_u64(svbool_t pg, const int16_t *base, svuint64_t offsets) + /// LDNT1SH Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IF_4A LDNT1SH {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P4, REG_V1, REG_R2, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1SH {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sh, EA_SCALABLE, REG_V3, REG_P2, REG_V1, REG_R0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(Vector mask, short* address, Vector offsets) => GatherVectorInt16WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + + /// GatherVectorInt32SignExtendNonTemporal : Load 32-bit data and sign-extend, non-temporal + + /// + /// svint64_t svldnt1sw_gather_[s64]index_s64(svbool_t pg, const int32_t *base, svint64_t indices) + /// LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IX_4A LDNT1SW {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_R1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_ZR, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendNonTemporal(mask, address, indices); + + /// + /// svint64_t svldnt1sw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// LDNT1SW Zresult.D, Pg/Z, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IX_4A LDNT1SW {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_R1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_ZR, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorInt32SignExtendNonTemporal(mask, addresses); + + /// + /// svint64_t svldnt1sw_gather_[u64]index_s64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IX_4A LDNT1SW {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_R1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_ZR, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendNonTemporal(mask, address, indices); + + /// + /// svint64_t svldnt1sw_gather_[s64]index_s64(svbool_t pg, const int32_t *base, svint64_t indices) + /// LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IX_4A LDNT1SW {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_R1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_ZR, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendNonTemporal(mask, address, indices); + + /// + /// svint64_t svldnt1sw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// LDNT1SW Zresult.D, Pg/Z, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IX_4A LDNT1SW {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_R1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_ZR, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorInt32SignExtendNonTemporal(mask, addresses); + + /// + /// svint64_t svldnt1sw_gather_[u64]index_s64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IX_4A LDNT1SW {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_R1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_ZR, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendNonTemporal(mask, address, indices); + + /// + /// svuint64_t svldnt1sw_gather_[s64]index_u64(svbool_t pg, const int32_t *base, svint64_t indices) + /// LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IX_4A LDNT1SW {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_R1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_ZR, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendNonTemporal(mask, address, indices); + + /// + /// svuint64_t svldnt1sw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// LDNT1SW Zresult.D, Pg/Z, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IX_4A LDNT1SW {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_R1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_ZR, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorInt32SignExtendNonTemporal(mask, addresses); + + /// + /// svuint64_t svldnt1sw_gather_[u64]index_u64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IX_4A LDNT1SW {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_R1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_ZR, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendNonTemporal(mask, address, indices); + + /// + /// svuint64_t svldnt1sw_gather_[s64]index_u64(svbool_t pg, const int32_t *base, svint64_t indices) + /// LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IX_4A LDNT1SW {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_R1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_ZR, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendNonTemporal(mask, address, indices); + + /// + /// svuint64_t svldnt1sw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// LDNT1SW Zresult.D, Pg/Z, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IX_4A LDNT1SW {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_R1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_ZR, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorInt32SignExtendNonTemporal(mask, addresses); + + /// + /// svuint64_t svldnt1sw_gather_[u64]index_u64(svbool_t pg, const int32_t *base, svuint64_t indices) + /// LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IX_4A LDNT1SW {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_R1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_ZR, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32SignExtendNonTemporal(Vector mask, int* address, Vector indices) => GatherVectorInt32SignExtendNonTemporal(mask, address, indices); + + + /// GatherVectorInt32WithByteOffsetsSignExtendNonTemporal : Load 32-bit data and sign-extend, non-temporal + + /// + /// svint64_t svldnt1sw_gather_[s64]offset_s64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IX_4A LDNT1SW {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_R1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_ZR, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1sw_gather_[u64]offset_s64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IX_4A LDNT1SW {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_R1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_ZR, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1sw_gather_[s64]offset_s64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IX_4A LDNT1SW {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_R1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_ZR, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1sw_gather_[u64]offset_s64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IX_4A LDNT1SW {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_R1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_ZR, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1sw_gather_[s64]offset_u64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IX_4A LDNT1SW {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_R1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_ZR, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1sw_gather_[u64]offset_u64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IX_4A LDNT1SW {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_R1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_ZR, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1sw_gather_[s64]offset_u64(svbool_t pg, const int32_t *base, svint64_t offsets) + /// LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IX_4A LDNT1SW {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_R1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_ZR, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1sw_gather_[u64]offset_u64(svbool_t pg, const int32_t *base, svuint64_t offsets) + /// LDNT1SW Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IX_4A LDNT1SW {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_R1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sw, EA_SCALABLE, REG_V7, REG_P1, REG_V0, REG_ZR, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(Vector mask, int* address, Vector offsets) => GatherVectorInt32WithByteOffsetsSignExtendNonTemporal(mask, address, offsets); + + + /// GatherVectorNonTemporal : Unextended load, non-temporal + + /// + /// svint32_t svldnt1_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// LDNT1W Zresult.S, Pg/Z, [Zbases.S, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses) => GatherVectorNonTemporal(mask, addresses); + + /// + /// svint32_t svldnt1_gather_[u32]offset[_s32](svbool_t pg, const int32_t *base, svuint32_t offsets) + /// LDNT1W Zresult.S, Pg/Z, [Zoffsets.S, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, int* address, Vector offsets) => GatherVectorNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// LDNT1D Zresult.D, Pg/Z, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, -1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IX_4A LDNT1D {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V4, REG_P2, REG_V1, REG_R3, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses) => GatherVectorNonTemporal(mask, addresses); + + /// + /// svint64_t svldnt1_gather_[s64]offset[_s64](svbool_t pg, const int64_t *base, svint64_t offsets) + /// LDNT1D Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, -1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IX_4A LDNT1D {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V4, REG_P2, REG_V1, REG_R3, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, long* address, Vector offsets) => GatherVectorNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1_gather_[u64]offset[_s64](svbool_t pg, const int64_t *base, svuint64_t offsets) + /// LDNT1D Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, -1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IX_4A LDNT1D {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V4, REG_P2, REG_V1, REG_R3, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, long* address, Vector offsets) => GatherVectorNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1_gather_[s64]index[_s64](svbool_t pg, const int64_t *base, svint64_t indices) + /// LDNT1D Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, -1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IX_4A LDNT1D {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V4, REG_P2, REG_V1, REG_R3, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, long* address, Vector indices) => GatherVectorNonTemporal(mask, address, indices); + + /// + /// svint64_t svldnt1_gather_[u64]index[_s64](svbool_t pg, const int64_t *base, svuint64_t indices) + /// LDNT1D Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, -1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IX_4A LDNT1D {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V4, REG_P2, REG_V1, REG_R3, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, long* address, Vector indices) => GatherVectorNonTemporal(mask, address, indices); + + /// + /// svuint32_t svldnt1_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// LDNT1W Zresult.S, Pg/Z, [Zbases.S, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses) => GatherVectorNonTemporal(mask, addresses); + + /// + /// svuint32_t svldnt1_gather_[u32]offset[_u32](svbool_t pg, const uint32_t *base, svuint32_t offsets) + /// LDNT1W Zresult.S, Pg/Z, [Zoffsets.S, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, uint* address, Vector offsets) => GatherVectorNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// LDNT1D Zresult.D, Pg/Z, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, -1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IX_4A LDNT1D {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V4, REG_P2, REG_V1, REG_R3, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses) => GatherVectorNonTemporal(mask, addresses); + + /// + /// svuint64_t svldnt1_gather_[s64]offset[_u64](svbool_t pg, const uint64_t *base, svint64_t offsets) + /// LDNT1D Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, -1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IX_4A LDNT1D {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V4, REG_P2, REG_V1, REG_R3, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, ulong* address, Vector offsets) => GatherVectorNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1_gather_[u64]offset[_u64](svbool_t pg, const uint64_t *base, svuint64_t offsets) + /// LDNT1D Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, -1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IX_4A LDNT1D {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V4, REG_P2, REG_V1, REG_R3, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, ulong* address, Vector offsets) => GatherVectorNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1_gather_[s64]index[_u64](svbool_t pg, const uint64_t *base, svint64_t indices) + /// LDNT1D Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, -1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IX_4A LDNT1D {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V4, REG_P2, REG_V1, REG_R3, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, ulong* address, Vector indices) => GatherVectorNonTemporal(mask, address, indices); + + /// + /// svuint64_t svldnt1_gather_[u64]index[_u64](svbool_t pg, const uint64_t *base, svuint64_t indices) + /// LDNT1D Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, -1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IX_4A LDNT1D {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V4, REG_P2, REG_V1, REG_R3, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, ulong* address, Vector indices) => GatherVectorNonTemporal(mask, address, indices); + + /// + /// svfloat32_t svldnt1_gather[_u32base]_f32(svbool_t pg, svuint32_t bases) + /// LDNT1W Zresult.S, Pg/Z, [Zbases.S, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses) => GatherVectorNonTemporal(mask, addresses); + + /// + /// svfloat32_t svldnt1_gather_[u32]offset[_f32](svbool_t pg, const float32_t *base, svuint32_t offsets) + /// LDNT1W Zresult.S, Pg/Z, [Zoffsets.S, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, float* address, Vector offsets) => GatherVectorNonTemporal(mask, address, offsets); + + /// + /// svfloat64_t svldnt1_gather_[s64]offset[_f64](svbool_t pg, const float64_t *base, svint64_t offsets) + /// LDNT1D Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, -1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IX_4A LDNT1D {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V4, REG_P2, REG_V1, REG_R3, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, double* address, Vector offsets) => GatherVectorNonTemporal(mask, address, offsets); + + /// + /// svfloat64_t svldnt1_gather_[s64]index[_f64](svbool_t pg, const float64_t *base, svint64_t indices) + /// LDNT1D Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, -1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IX_4A LDNT1D {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V4, REG_P2, REG_V1, REG_R3, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, double* address, Vector indices) => GatherVectorNonTemporal(mask, address, indices); + + /// + /// svfloat64_t svldnt1_gather[_u64base]_f64(svbool_t pg, svuint64_t bases) + /// LDNT1D Zresult.D, Pg/Z, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, -1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IX_4A LDNT1D {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V4, REG_P2, REG_V1, REG_R3, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, Vector addresses) => GatherVectorNonTemporal(mask, addresses); + + /// + /// svfloat64_t svldnt1_gather_[u64]offset[_f64](svbool_t pg, const float64_t *base, svuint64_t offsets) + /// LDNT1D Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, -1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IX_4A LDNT1D {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V4, REG_P2, REG_V1, REG_R3, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, double* address, Vector offsets) => GatherVectorNonTemporal(mask, address, offsets); + + /// + /// svfloat64_t svldnt1_gather_[u64]index[_f64](svbool_t pg, const float64_t *base, svuint64_t indices) + /// LDNT1D Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1D {.D }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1d, EA_SCALABLE, REG_V3, REG_P4, REG_R5, -1, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1D {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_IX_4A LDNT1D {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1d, EA_SCALABLE, REG_V4, REG_P2, REG_V1, REG_R3, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorNonTemporal(Vector mask, double* address, Vector indices) => GatherVectorNonTemporal(mask, address, indices); + + + /// GatherVectorSByteSignExtendNonTemporal : Load 8-bit data and sign-extend, non-temporal + + /// + /// svint32_t svldnt1sb_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// LDNT1SB Zresult.S, Pg/Z, [Zbases.S, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IF_4A LDNT1SB {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sb, EA_SCALABLE, REG_V2, REG_P3, REG_V5, REG_R4, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1SB {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sb, EA_SCALABLE, REG_V2, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorSByteSignExtendNonTemporal(mask, addresses); + + /// + /// svint32_t svldnt1sb_gather_[u32]offset_s32(svbool_t pg, const int8_t *base, svuint32_t offsets) + /// LDNT1SB Zresult.S, Pg/Z, [Zoffsets.S, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IF_4A LDNT1SB {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sb, EA_SCALABLE, REG_V2, REG_P3, REG_V5, REG_R4, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1SB {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sb, EA_SCALABLE, REG_V2, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1sb_gather_[s64]offset_s64(svbool_t pg, const int8_t *base, svint64_t offsets) + /// LDNT1SB Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IF_4A LDNT1SB {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sb, EA_SCALABLE, REG_V2, REG_P3, REG_V5, REG_R4, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1SB {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sb, EA_SCALABLE, REG_V2, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1sb_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// LDNT1SB Zresult.D, Pg/Z, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IF_4A LDNT1SB {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sb, EA_SCALABLE, REG_V2, REG_P3, REG_V5, REG_R4, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1SB {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sb, EA_SCALABLE, REG_V2, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorSByteSignExtendNonTemporal(mask, addresses); + + /// + /// svint64_t svldnt1sb_gather_[u64]offset_s64(svbool_t pg, const int8_t *base, svuint64_t offsets) + /// LDNT1SB Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IF_4A LDNT1SB {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sb, EA_SCALABLE, REG_V2, REG_P3, REG_V5, REG_R4, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1SB {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sb, EA_SCALABLE, REG_V2, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendNonTemporal(mask, address, offsets); + + /// + /// svuint32_t svldnt1sb_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// LDNT1SB Zresult.S, Pg/Z, [Zbases.S, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IF_4A LDNT1SB {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sb, EA_SCALABLE, REG_V2, REG_P3, REG_V5, REG_R4, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1SB {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sb, EA_SCALABLE, REG_V2, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorSByteSignExtendNonTemporal(mask, addresses); + + /// + /// svuint32_t svldnt1sb_gather_[u32]offset_u32(svbool_t pg, const int8_t *base, svuint32_t offsets) + /// LDNT1SB Zresult.S, Pg/Z, [Zoffsets.S, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IF_4A LDNT1SB {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sb, EA_SCALABLE, REG_V2, REG_P3, REG_V5, REG_R4, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1SB {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sb, EA_SCALABLE, REG_V2, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1sb_gather_[s64]offset_u64(svbool_t pg, const int8_t *base, svint64_t offsets) + /// LDNT1SB Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IF_4A LDNT1SB {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sb, EA_SCALABLE, REG_V2, REG_P3, REG_V5, REG_R4, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1SB {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sb, EA_SCALABLE, REG_V2, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1sb_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// LDNT1SB Zresult.D, Pg/Z, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IF_4A LDNT1SB {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sb, EA_SCALABLE, REG_V2, REG_P3, REG_V5, REG_R4, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1SB {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sb, EA_SCALABLE, REG_V2, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorSByteSignExtendNonTemporal(mask, addresses); + + /// + /// svuint64_t svldnt1sb_gather_[u64]offset_u64(svbool_t pg, const int8_t *base, svuint64_t offsets) + /// LDNT1SB Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IF_4A LDNT1SB {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sb, EA_SCALABLE, REG_V2, REG_P3, REG_V5, REG_R4, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1SB {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1sb, EA_SCALABLE, REG_V2, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GatherVectorSByteSignExtendNonTemporal(Vector mask, sbyte* address, Vector offsets) => GatherVectorSByteSignExtendNonTemporal(mask, address, offsets); + + + /// GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal : Load 16-bit data and zero-extend, non-temporal + + /// + /// svint32_t svldnt1uh_gather_[u32]offset_s32(svbool_t pg, const uint16_t *base, svuint32_t offsets) + /// LDNT1H Zresult.S, Pg/Z, [Zoffsets.S, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1h, EA_SCALABLE, REG_V6, REG_P7, REG_R8, 0, INS_OPTS_SCALABLE_H); + /// IF_SVE_IF_4A LDNT1H {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_V2, REG_R3, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1H {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V1, REG_P4, REG_V3, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1uh_gather_[s64]offset_s64(svbool_t pg, const uint16_t *base, svint64_t offsets) + /// LDNT1H Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1h, EA_SCALABLE, REG_V6, REG_P7, REG_R8, 0, INS_OPTS_SCALABLE_H); + /// IF_SVE_IF_4A LDNT1H {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_V2, REG_R3, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1H {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V1, REG_P4, REG_V3, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1uh_gather_[u64]offset_s64(svbool_t pg, const uint16_t *base, svuint64_t offsets) + /// LDNT1H Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1h, EA_SCALABLE, REG_V6, REG_P7, REG_R8, 0, INS_OPTS_SCALABLE_H); + /// IF_SVE_IF_4A LDNT1H {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_V2, REG_R3, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1H {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V1, REG_P4, REG_V3, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svuint32_t svldnt1uh_gather_[u32]offset_u32(svbool_t pg, const uint16_t *base, svuint32_t offsets) + /// LDNT1H Zresult.S, Pg/Z, [Zoffsets.S, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1h, EA_SCALABLE, REG_V6, REG_P7, REG_R8, 0, INS_OPTS_SCALABLE_H); + /// IF_SVE_IF_4A LDNT1H {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_V2, REG_R3, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1H {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V1, REG_P4, REG_V3, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1uh_gather_[s64]offset_u64(svbool_t pg, const uint16_t *base, svint64_t offsets) + /// LDNT1H Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1h, EA_SCALABLE, REG_V6, REG_P7, REG_R8, 0, INS_OPTS_SCALABLE_H); + /// IF_SVE_IF_4A LDNT1H {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_V2, REG_R3, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1H {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V1, REG_P4, REG_V3, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1uh_gather_[u64]offset_u64(svbool_t pg, const uint16_t *base, svuint64_t offsets) + /// LDNT1H Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1h, EA_SCALABLE, REG_V6, REG_P7, REG_R8, 0, INS_OPTS_SCALABLE_H); + /// IF_SVE_IF_4A LDNT1H {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_V2, REG_R3, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1H {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V1, REG_P4, REG_V3, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(Vector mask, ushort* address, Vector offsets) => GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + + /// GatherVectorUInt16ZeroExtendNonTemporal : Load 16-bit data and zero-extend, non-temporal + + /// + /// svint32_t svldnt1uh_gather[_u32base]_s32(svbool_t pg, svuint32_t bases) + /// LDNT1H Zresult.S, Pg/Z, [Zbases.S, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1h, EA_SCALABLE, REG_V6, REG_P7, REG_R8, 0, INS_OPTS_SCALABLE_H); + /// IF_SVE_IF_4A LDNT1H {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_V2, REG_R3, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1H {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V1, REG_P4, REG_V3, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorUInt16ZeroExtendNonTemporal(mask, addresses); + + /// + /// svint64_t svldnt1uh_gather_[s64]index_s64(svbool_t pg, const uint16_t *base, svint64_t indices) + /// LDNT1H Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1h, EA_SCALABLE, REG_V6, REG_P7, REG_R8, 0, INS_OPTS_SCALABLE_H); + /// IF_SVE_IF_4A LDNT1H {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_V2, REG_R3, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1H {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V1, REG_P4, REG_V3, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtendNonTemporal(mask, address, indices); + + /// + /// svint64_t svldnt1uh_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// LDNT1H Zresult.D, Pg/Z, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1h, EA_SCALABLE, REG_V6, REG_P7, REG_R8, 0, INS_OPTS_SCALABLE_H); + /// IF_SVE_IF_4A LDNT1H {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_V2, REG_R3, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1H {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V1, REG_P4, REG_V3, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorUInt16ZeroExtendNonTemporal(mask, addresses); + + /// + /// svint64_t svldnt1uh_gather_[u64]index_s64(svbool_t pg, const uint16_t *base, svuint64_t indices) + /// LDNT1H Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1h, EA_SCALABLE, REG_V6, REG_P7, REG_R8, 0, INS_OPTS_SCALABLE_H); + /// IF_SVE_IF_4A LDNT1H {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_V2, REG_R3, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1H {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V1, REG_P4, REG_V3, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtendNonTemporal(mask, address, indices); + + /// + /// svuint32_t svldnt1uh_gather[_u32base]_u32(svbool_t pg, svuint32_t bases) + /// LDNT1H Zresult.S, Pg/Z, [Zbases.S, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1h, EA_SCALABLE, REG_V6, REG_P7, REG_R8, 0, INS_OPTS_SCALABLE_H); + /// IF_SVE_IF_4A LDNT1H {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_V2, REG_R3, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1H {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V1, REG_P4, REG_V3, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorUInt16ZeroExtendNonTemporal(mask, addresses); + + /// + /// svuint64_t svldnt1uh_gather_[s64]index_u64(svbool_t pg, const uint16_t *base, svint64_t indices) + /// LDNT1H Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1h, EA_SCALABLE, REG_V6, REG_P7, REG_R8, 0, INS_OPTS_SCALABLE_H); + /// IF_SVE_IF_4A LDNT1H {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_V2, REG_R3, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1H {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V1, REG_P4, REG_V3, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtendNonTemporal(mask, address, indices); + + /// + /// svuint64_t svldnt1uh_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// LDNT1H Zresult.D, Pg/Z, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1h, EA_SCALABLE, REG_V6, REG_P7, REG_R8, 0, INS_OPTS_SCALABLE_H); + /// IF_SVE_IF_4A LDNT1H {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_V2, REG_R3, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1H {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V1, REG_P4, REG_V3, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorUInt16ZeroExtendNonTemporal(mask, addresses); + + /// + /// svuint64_t svldnt1uh_gather_[u64]index_u64(svbool_t pg, const uint16_t *base, svuint64_t indices) + /// LDNT1H Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1h, EA_SCALABLE, REG_V6, REG_P7, REG_R8, 0, INS_OPTS_SCALABLE_H); + /// IF_SVE_IF_4A LDNT1H {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_V2, REG_R3, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1H {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V1, REG_P4, REG_V3, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt16ZeroExtendNonTemporal(Vector mask, ushort* address, Vector indices) => GatherVectorUInt16ZeroExtendNonTemporal(mask, address, indices); + + + /// GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal : Load 32-bit data and zero-extend, non-temporal + + /// + /// svint64_t svldnt1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1uw_gather_[s64]offset_s64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svint64_t svldnt1uw_gather_[u64]offset_s64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1uw_gather_[s64]offset_u64(svbool_t pg, const uint32_t *base, svint64_t offsets) + /// LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + /// + /// svuint64_t svldnt1uw_gather_[u64]offset_u64(svbool_t pg, const uint32_t *base, svuint64_t offsets) + /// LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(Vector mask, uint* address, Vector offsets) => GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal(mask, address, offsets); + + + /// GatherVectorUInt32ZeroExtendNonTemporal : Load 32-bit data and zero-extend, non-temporal + + /// + /// svint64_t svldnt1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendNonTemporal(mask, address, indices); + + /// + /// svint64_t svldnt1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// LDNT1W Zresult.D, Pg/Z, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtendNonTemporal(mask, addresses); + + /// + /// svint64_t svldnt1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendNonTemporal(mask, address, indices); + + /// + /// svint64_t svldnt1uw_gather_[s64]index_s64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendNonTemporal(mask, address, indices); + + /// + /// svint64_t svldnt1uw_gather[_u64base]_s64(svbool_t pg, svuint64_t bases) + /// LDNT1W Zresult.D, Pg/Z, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtendNonTemporal(mask, addresses); + + /// + /// svint64_t svldnt1uw_gather_[u64]index_s64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendNonTemporal(mask, address, indices); + + /// + /// svuint64_t svldnt1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendNonTemporal(mask, address, indices); + + /// + /// svuint64_t svldnt1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// LDNT1W Zresult.D, Pg/Z, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtendNonTemporal(mask, addresses); + + /// + /// svuint64_t svldnt1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendNonTemporal(mask, address, indices); + + /// + /// svuint64_t svldnt1uw_gather_[s64]index_u64(svbool_t pg, const uint32_t *base, svint64_t indices) + /// LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendNonTemporal(mask, address, indices); + + /// + /// svuint64_t svldnt1uw_gather[_u64base]_u64(svbool_t pg, svuint64_t bases) + /// LDNT1W Zresult.D, Pg/Z, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, Vector addresses) => GatherVectorUInt32ZeroExtendNonTemporal(mask, addresses); + + /// + /// svuint64_t svldnt1uw_gather_[u64]index_u64(svbool_t pg, const uint32_t *base, svuint64_t indices) + /// LDNT1W Zresult.D, Pg/Z, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1W {.S }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -8, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A LDNT1W {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1W {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V4, REG_P1, REG_V2, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1W {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1w, EA_SCALABLE, REG_V2, REG_P0, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector GatherVectorUInt32ZeroExtendNonTemporal(Vector mask, uint* address, Vector indices) => GatherVectorUInt32ZeroExtendNonTemporal(mask, address, indices); + + + /// HalvingAdd : Halving add + + /// + /// svint8_t svhadd[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// SHADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; SHADD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svhadd[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// SHADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// SHADD Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; SHADD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svhadd[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; SHADD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; SHADD Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_EP_3A SHADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_shadd, EA_SCALABLE, REG_V15, REG_P0, REG_V10, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector HalvingAdd(Vector left, Vector right) => HalvingAdd(left, right); + + /// + /// svint16_t svhadd[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// SHADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; SHADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svhadd[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// SHADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// SHADD Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; SHADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svhadd[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; SHADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; SHADD Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_EP_3A SHADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_shadd, EA_SCALABLE, REG_V15, REG_P0, REG_V10, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector HalvingAdd(Vector left, Vector right) => HalvingAdd(left, right); + + /// + /// svint32_t svhadd[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// SHADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; SHADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svhadd[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// SHADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// SHADD Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; SHADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svhadd[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; SHADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; SHADD Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_EP_3A SHADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_shadd, EA_SCALABLE, REG_V15, REG_P0, REG_V10, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector HalvingAdd(Vector left, Vector right) => HalvingAdd(left, right); + + /// + /// svint64_t svhadd[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// SHADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; SHADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svhadd[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// SHADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// SHADD Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; SHADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svhadd[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; SHADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; SHADD Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_EP_3A SHADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_shadd, EA_SCALABLE, REG_V15, REG_P0, REG_V10, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector HalvingAdd(Vector left, Vector right) => HalvingAdd(left, right); + + /// + /// svuint8_t svhadd[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// UHADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; UHADD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svhadd[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// UHADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// UHADD Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; UHADD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svhadd[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; UHADD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; UHADD Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_EP_3A UHADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uhadd, EA_SCALABLE, REG_V19, REG_P4, REG_V14, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector HalvingAdd(Vector left, Vector right) => HalvingAdd(left, right); + + /// + /// svuint16_t svhadd[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// UHADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; UHADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svhadd[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// UHADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// UHADD Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; UHADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svhadd[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; UHADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; UHADD Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_EP_3A UHADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uhadd, EA_SCALABLE, REG_V19, REG_P4, REG_V14, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector HalvingAdd(Vector left, Vector right) => HalvingAdd(left, right); + + /// + /// svuint32_t svhadd[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// UHADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; UHADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svhadd[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// UHADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// UHADD Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; UHADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svhadd[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; UHADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; UHADD Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_EP_3A UHADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uhadd, EA_SCALABLE, REG_V19, REG_P4, REG_V14, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector HalvingAdd(Vector left, Vector right) => HalvingAdd(left, right); + + /// + /// svuint64_t svhadd[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// UHADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; UHADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svhadd[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// UHADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// UHADD Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; UHADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svhadd[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; UHADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; UHADD Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_EP_3A UHADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uhadd, EA_SCALABLE, REG_V19, REG_P4, REG_V14, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector HalvingAdd(Vector left, Vector right) => HalvingAdd(left, right); + + + /// HalvingSubtract : Halving subtract + + /// + /// svint8_t svhsub[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// SHSUB Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; SHSUB Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svhsub[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// SHSUB Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// SHSUBR Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; SHSUB Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svhsub[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; SHSUB Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; SHSUBR Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_EP_3A SHSUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_shsub, EA_SCALABLE, REG_V16, REG_P1, REG_V11, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector HalvingSubtract(Vector left, Vector right) => HalvingSubtract(left, right); + + /// + /// svint16_t svhsub[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// SHSUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; SHSUB Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svhsub[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// SHSUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// SHSUBR Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; SHSUB Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svhsub[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; SHSUB Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; SHSUBR Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_EP_3A SHSUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_shsub, EA_SCALABLE, REG_V16, REG_P1, REG_V11, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector HalvingSubtract(Vector left, Vector right) => HalvingSubtract(left, right); + + /// + /// svint32_t svhsub[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// SHSUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; SHSUB Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svhsub[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// SHSUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// SHSUBR Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; SHSUB Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svhsub[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; SHSUB Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; SHSUBR Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_EP_3A SHSUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_shsub, EA_SCALABLE, REG_V16, REG_P1, REG_V11, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector HalvingSubtract(Vector left, Vector right) => HalvingSubtract(left, right); + + /// + /// svint64_t svhsub[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// SHSUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; SHSUB Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svhsub[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// SHSUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// SHSUBR Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; SHSUB Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svhsub[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; SHSUB Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; SHSUBR Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_EP_3A SHSUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_shsub, EA_SCALABLE, REG_V16, REG_P1, REG_V11, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector HalvingSubtract(Vector left, Vector right) => HalvingSubtract(left, right); + + /// + /// svuint8_t svhsub[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// UHSUB Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; UHSUB Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svhsub[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// UHSUB Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// UHSUBR Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; UHSUB Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svhsub[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; UHSUB Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; UHSUBR Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_EP_3A UHSUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uhsub, EA_SCALABLE, REG_V20, REG_P5, REG_V15, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector HalvingSubtract(Vector left, Vector right) => HalvingSubtract(left, right); + + /// + /// svuint16_t svhsub[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// UHSUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; UHSUB Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svhsub[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// UHSUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// UHSUBR Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; UHSUB Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svhsub[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; UHSUB Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; UHSUBR Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_EP_3A UHSUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uhsub, EA_SCALABLE, REG_V20, REG_P5, REG_V15, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector HalvingSubtract(Vector left, Vector right) => HalvingSubtract(left, right); + + /// + /// svuint32_t svhsub[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// UHSUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; UHSUB Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svhsub[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// UHSUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// UHSUBR Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; UHSUB Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svhsub[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; UHSUB Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; UHSUBR Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_EP_3A UHSUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uhsub, EA_SCALABLE, REG_V20, REG_P5, REG_V15, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector HalvingSubtract(Vector left, Vector right) => HalvingSubtract(left, right); + + /// + /// svuint64_t svhsub[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// UHSUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; UHSUB Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svhsub[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// UHSUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// UHSUBR Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; UHSUB Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svhsub[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; UHSUB Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; UHSUBR Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_EP_3A UHSUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uhsub, EA_SCALABLE, REG_V20, REG_P5, REG_V15, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector HalvingSubtract(Vector left, Vector right) => HalvingSubtract(left, right); + + + /// HalvingSubtractReversed : Halving subtract reversed + + /// + /// svint8_t svhsubr[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// SHSUBR Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; SHSUBR Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svhsubr[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// SHSUBR Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// SHSUB Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; SHSUBR Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svhsubr[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; SHSUBR Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; SHSUB Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_EP_3A SHSUBR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_shsubr, EA_SCALABLE, REG_V17, REG_P2, REG_V12, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right) => HalvingSubtractReversed(left, right); + + /// + /// svint16_t svhsubr[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// SHSUBR Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; SHSUBR Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svhsubr[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// SHSUBR Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// SHSUB Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; SHSUBR Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svhsubr[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; SHSUBR Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; SHSUB Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_EP_3A SHSUBR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_shsubr, EA_SCALABLE, REG_V17, REG_P2, REG_V12, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right) => HalvingSubtractReversed(left, right); + + /// + /// svint32_t svhsubr[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// SHSUBR Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; SHSUBR Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svhsubr[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// SHSUBR Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// SHSUB Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; SHSUBR Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svhsubr[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; SHSUBR Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; SHSUB Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_EP_3A SHSUBR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_shsubr, EA_SCALABLE, REG_V17, REG_P2, REG_V12, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right) => HalvingSubtractReversed(left, right); + + /// + /// svint64_t svhsubr[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// SHSUBR Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; SHSUBR Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svhsubr[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// SHSUBR Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// SHSUB Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; SHSUBR Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svhsubr[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; SHSUBR Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; SHSUB Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_EP_3A SHSUBR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_shsubr, EA_SCALABLE, REG_V17, REG_P2, REG_V12, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right) => HalvingSubtractReversed(left, right); + + /// + /// svuint8_t svhsubr[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// UHSUBR Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; UHSUBR Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svhsubr[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// UHSUBR Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// UHSUB Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; UHSUBR Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svhsubr[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; UHSUBR Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; UHSUB Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_EP_3A UHSUBR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uhsubr, EA_SCALABLE, REG_V21, REG_P6, REG_V16, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right) => HalvingSubtractReversed(left, right); + + /// + /// svuint16_t svhsubr[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// UHSUBR Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; UHSUBR Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svhsubr[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// UHSUBR Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// UHSUB Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; UHSUBR Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svhsubr[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; UHSUBR Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; UHSUB Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_EP_3A UHSUBR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uhsubr, EA_SCALABLE, REG_V21, REG_P6, REG_V16, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right) => HalvingSubtractReversed(left, right); + + /// + /// svuint32_t svhsubr[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// UHSUBR Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; UHSUBR Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svhsubr[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// UHSUBR Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// UHSUB Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; UHSUBR Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svhsubr[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; UHSUBR Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; UHSUB Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_EP_3A UHSUBR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uhsubr, EA_SCALABLE, REG_V21, REG_P6, REG_V16, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right) => HalvingSubtractReversed(left, right); + + /// + /// svuint64_t svhsubr[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// UHSUBR Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; UHSUBR Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svhsubr[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// UHSUBR Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// UHSUB Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; UHSUBR Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svhsubr[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; UHSUBR Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; UHSUB Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_EP_3A UHSUBR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uhsubr, EA_SCALABLE, REG_V21, REG_P6, REG_V16, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector HalvingSubtractReversed(Vector left, Vector right) => HalvingSubtractReversed(left, right); + + + /// InterleavingXorLowerUpper : Interleaving exclusive OR (bottom, top) + + /// + /// svint8_t sveorbt[_s8](svint8_t odd, svint8_t op1, svint8_t op2) + /// EORBT Ztied.B, Zop1.B, Zop2.B + /// MOVPRFX Zresult, Zodd; EORBT Zresult.B, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FP_3A EORBT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_eorbt, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_eorbt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right) => InterleavingXorLowerUpper(odd, left, right); + + /// + /// svint16_t sveorbt[_s16](svint16_t odd, svint16_t op1, svint16_t op2) + /// EORBT Ztied.H, Zop1.H, Zop2.H + /// MOVPRFX Zresult, Zodd; EORBT Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FP_3A EORBT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_eorbt, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_eorbt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right) => InterleavingXorLowerUpper(odd, left, right); + + /// + /// svint32_t sveorbt[_s32](svint32_t odd, svint32_t op1, svint32_t op2) + /// EORBT Ztied.S, Zop1.S, Zop2.S + /// MOVPRFX Zresult, Zodd; EORBT Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FP_3A EORBT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_eorbt, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_eorbt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right) => InterleavingXorLowerUpper(odd, left, right); + + /// + /// svint64_t sveorbt[_s64](svint64_t odd, svint64_t op1, svint64_t op2) + /// EORBT Ztied.D, Zop1.D, Zop2.D + /// MOVPRFX Zresult, Zodd; EORBT Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_FP_3A EORBT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_eorbt, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_eorbt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right) => InterleavingXorLowerUpper(odd, left, right); + + /// + /// svuint8_t sveorbt[_u8](svuint8_t odd, svuint8_t op1, svuint8_t op2) + /// EORBT Ztied.B, Zop1.B, Zop2.B + /// MOVPRFX Zresult, Zodd; EORBT Zresult.B, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FP_3A EORBT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_eorbt, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_eorbt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right) => InterleavingXorLowerUpper(odd, left, right); + + /// + /// svuint16_t sveorbt[_u16](svuint16_t odd, svuint16_t op1, svuint16_t op2) + /// EORBT Ztied.H, Zop1.H, Zop2.H + /// MOVPRFX Zresult, Zodd; EORBT Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FP_3A EORBT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_eorbt, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_eorbt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right) => InterleavingXorLowerUpper(odd, left, right); + + /// + /// svuint32_t sveorbt[_u32](svuint32_t odd, svuint32_t op1, svuint32_t op2) + /// EORBT Ztied.S, Zop1.S, Zop2.S + /// MOVPRFX Zresult, Zodd; EORBT Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FP_3A EORBT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_eorbt, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_eorbt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right) => InterleavingXorLowerUpper(odd, left, right); + + /// + /// svuint64_t sveorbt[_u64](svuint64_t odd, svuint64_t op1, svuint64_t op2) + /// EORBT Ztied.D, Zop1.D, Zop2.D + /// MOVPRFX Zresult, Zodd; EORBT Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_FP_3A EORBT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_eorbt, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_eorbt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleavingXorLowerUpper(Vector odd, Vector left, Vector right) => InterleavingXorLowerUpper(odd, left, right); + + + /// InterleavingXorUpperLower : Interleaving exclusive OR (top, bottom) + + /// + /// svint8_t sveortb[_s8](svint8_t even, svint8_t op1, svint8_t op2) + /// EORTB Ztied.B, Zop1.B, Zop2.B + /// MOVPRFX Zresult, Zeven; EORTB Zresult.B, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FP_3A EORTB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_eortb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_eortb, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right) => InterleavingXorUpperLower(even, left, right); + + /// + /// svint16_t sveortb[_s16](svint16_t even, svint16_t op1, svint16_t op2) + /// EORTB Ztied.H, Zop1.H, Zop2.H + /// MOVPRFX Zresult, Zeven; EORTB Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FP_3A EORTB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_eortb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_eortb, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right) => InterleavingXorUpperLower(even, left, right); + + /// + /// svint32_t sveortb[_s32](svint32_t even, svint32_t op1, svint32_t op2) + /// EORTB Ztied.S, Zop1.S, Zop2.S + /// MOVPRFX Zresult, Zeven; EORTB Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FP_3A EORTB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_eortb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_eortb, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right) => InterleavingXorUpperLower(even, left, right); + + /// + /// svint64_t sveortb[_s64](svint64_t even, svint64_t op1, svint64_t op2) + /// EORTB Ztied.D, Zop1.D, Zop2.D + /// MOVPRFX Zresult, Zeven; EORTB Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_FP_3A EORTB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_eortb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_eortb, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right) => InterleavingXorUpperLower(even, left, right); + + /// + /// svuint8_t sveortb[_u8](svuint8_t even, svuint8_t op1, svuint8_t op2) + /// EORTB Ztied.B, Zop1.B, Zop2.B + /// MOVPRFX Zresult, Zeven; EORTB Zresult.B, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FP_3A EORTB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_eortb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_eortb, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right) => InterleavingXorUpperLower(even, left, right); + + /// + /// svuint16_t sveortb[_u16](svuint16_t even, svuint16_t op1, svuint16_t op2) + /// EORTB Ztied.H, Zop1.H, Zop2.H + /// MOVPRFX Zresult, Zeven; EORTB Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FP_3A EORTB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_eortb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_eortb, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right) => InterleavingXorUpperLower(even, left, right); + + /// + /// svuint32_t sveortb[_u32](svuint32_t even, svuint32_t op1, svuint32_t op2) + /// EORTB Ztied.S, Zop1.S, Zop2.S + /// MOVPRFX Zresult, Zeven; EORTB Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FP_3A EORTB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_eortb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_eortb, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right) => InterleavingXorUpperLower(even, left, right); + + /// + /// svuint64_t sveortb[_u64](svuint64_t even, svuint64_t op1, svuint64_t op2) + /// EORTB Ztied.D, Zop1.D, Zop2.D + /// MOVPRFX Zresult, Zeven; EORTB Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_FP_3A EORTB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_eortb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_eortb, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector InterleavingXorUpperLower(Vector even, Vector left, Vector right) => InterleavingXorUpperLower(even, left, right); + + + /// Log2 : Base 2 logarithm as integer + + /// + /// svint32_t svlogb[_f32]_m(svint32_t inactive, svbool_t pg, svfloat32_t op) + /// FLOGB Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; FLOGB Zresult.S, Pg/M, Zop.S + /// svint32_t svlogb[_f32]_x(svbool_t pg, svfloat32_t op) + /// FLOGB Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; FLOGB Zresult.S, Pg/M, Zop.S + /// svint32_t svlogb[_f32]_z(svbool_t pg, svfloat32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; FLOGB Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_HP_3A FLOGB ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_flogb, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_flogb, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_flogb, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Log2(Vector value) => Log2(value); + + /// + /// svint64_t svlogb[_f64]_m(svint64_t inactive, svbool_t pg, svfloat64_t op) + /// FLOGB Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; FLOGB Zresult.D, Pg/M, Zop.D + /// svint64_t svlogb[_f64]_x(svbool_t pg, svfloat64_t op) + /// FLOGB Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; FLOGB Zresult.D, Pg/M, Zop.D + /// svint64_t svlogb[_f64]_z(svbool_t pg, svfloat64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; FLOGB Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_HP_3A FLOGB ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_flogb, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_flogb, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_flogb, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Log2(Vector value) => Log2(value); + + + /// Match : Detect any matching elements + + /// + /// svbool_t svmatch[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// MATCH Presult.B, Pg/Z, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_GE_4A MATCH ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_match, EA_SCALABLE, REG_P15, REG_P0, REG_V21, REG_V0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector Match(Vector mask, Vector left, Vector right) => Match(mask, left, right); + + /// + /// svbool_t svmatch[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// MATCH Presult.H, Pg/Z, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_GE_4A MATCH ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_match, EA_SCALABLE, REG_P15, REG_P0, REG_V21, REG_V0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector Match(Vector mask, Vector left, Vector right) => Match(mask, left, right); + + /// + /// svbool_t svmatch[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// MATCH Presult.B, Pg/Z, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_GE_4A MATCH ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_match, EA_SCALABLE, REG_P15, REG_P0, REG_V21, REG_V0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector Match(Vector mask, Vector left, Vector right) => Match(mask, left, right); + + /// + /// svbool_t svmatch[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// MATCH Presult.H, Pg/Z, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_GE_4A MATCH ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_match, EA_SCALABLE, REG_P15, REG_P0, REG_V21, REG_V0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector Match(Vector mask, Vector left, Vector right) => Match(mask, left, right); + + + /// MaxNumberPairwise : Maximum number pairwise + + /// + /// svfloat32_t svmaxnmp[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FMAXNMP Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; FMAXNMP Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svfloat32_t svmaxnmp[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FMAXNMP Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; FMAXNMP Zresult.S, Pg/M, Zresult.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_GR_3A FMAXNMP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmaxnmp, EA_SCALABLE, REG_V17, REG_P4, REG_V18, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxNumberPairwise(Vector left, Vector right) => MaxNumberPairwise(left, right); + + /// + /// svfloat64_t svmaxnmp[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FMAXNMP Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; FMAXNMP Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svfloat64_t svmaxnmp[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FMAXNMP Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; FMAXNMP Zresult.D, Pg/M, Zresult.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_GR_3A FMAXNMP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmaxnmp, EA_SCALABLE, REG_V17, REG_P4, REG_V18, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxNumberPairwise(Vector left, Vector right) => MaxNumberPairwise(left, right); + + + /// MaxPairwise : Maximum pairwise + + /// + /// svint8_t svmaxp[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// SMAXP Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; SMAXP Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svmaxp[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// SMAXP Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; SMAXP Zresult.B, Pg/M, Zresult.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_ER_3A SMAXP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smaxp, EA_SCALABLE, REG_V24, REG_P5, REG_V19, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) => MaxPairwise(left, right); + + /// + /// svint16_t svmaxp[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// SMAXP Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; SMAXP Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svmaxp[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// SMAXP Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; SMAXP Zresult.H, Pg/M, Zresult.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_ER_3A SMAXP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smaxp, EA_SCALABLE, REG_V24, REG_P5, REG_V19, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) => MaxPairwise(left, right); + + /// + /// svint32_t svmaxp[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// SMAXP Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; SMAXP Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svmaxp[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// SMAXP Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; SMAXP Zresult.S, Pg/M, Zresult.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_ER_3A SMAXP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smaxp, EA_SCALABLE, REG_V24, REG_P5, REG_V19, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) => MaxPairwise(left, right); + + /// + /// svint64_t svmaxp[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// SMAXP Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; SMAXP Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svmaxp[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// SMAXP Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; SMAXP Zresult.D, Pg/M, Zresult.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_ER_3A SMAXP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smaxp, EA_SCALABLE, REG_V24, REG_P5, REG_V19, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) => MaxPairwise(left, right); + + /// + /// svuint8_t svmaxp[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// UMAXP Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; UMAXP Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svmaxp[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// UMAXP Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; UMAXP Zresult.B, Pg/M, Zresult.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_ER_3A UMAXP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umaxp, EA_SCALABLE, REG_V26, REG_P3, REG_V21, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) => MaxPairwise(left, right); + + /// + /// svuint16_t svmaxp[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// UMAXP Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; UMAXP Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svmaxp[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// UMAXP Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; UMAXP Zresult.H, Pg/M, Zresult.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_ER_3A UMAXP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umaxp, EA_SCALABLE, REG_V26, REG_P3, REG_V21, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) => MaxPairwise(left, right); + + /// + /// svuint32_t svmaxp[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// UMAXP Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; UMAXP Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svmaxp[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// UMAXP Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; UMAXP Zresult.S, Pg/M, Zresult.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_ER_3A UMAXP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umaxp, EA_SCALABLE, REG_V26, REG_P3, REG_V21, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) => MaxPairwise(left, right); + + /// + /// svuint64_t svmaxp[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// UMAXP Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; UMAXP Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svmaxp[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// UMAXP Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; UMAXP Zresult.D, Pg/M, Zresult.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_ER_3A UMAXP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umaxp, EA_SCALABLE, REG_V26, REG_P3, REG_V21, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) => MaxPairwise(left, right); + + /// + /// svfloat32_t svmaxp[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FMAXP Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; FMAXP Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svfloat32_t svmaxp[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FMAXP Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; FMAXP Zresult.S, Pg/M, Zresult.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_GR_3A FMAXP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmaxp, EA_SCALABLE, REG_V18, REG_P5, REG_V17, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) => MaxPairwise(left, right); + + /// + /// svfloat64_t svmaxp[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FMAXP Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; FMAXP Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svfloat64_t svmaxp[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FMAXP Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; FMAXP Zresult.D, Pg/M, Zresult.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_GR_3A FMAXP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmaxp, EA_SCALABLE, REG_V18, REG_P5, REG_V17, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) => MaxPairwise(left, right); + + + /// MinNumberPairwise : Minimum number pairwise + + /// + /// svfloat32_t svminnmp[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FMINNMP Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; FMINNMP Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svfloat32_t svminnmp[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FMINNMP Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; FMINNMP Zresult.S, Pg/M, Zresult.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_GR_3A FMINNMP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fminnmp, EA_SCALABLE, REG_V19, REG_P6, REG_V16, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinNumberPairwise(Vector left, Vector right) => MinNumberPairwise(left, right); + + /// + /// svfloat64_t svminnmp[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FMINNMP Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; FMINNMP Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svfloat64_t svminnmp[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FMINNMP Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; FMINNMP Zresult.D, Pg/M, Zresult.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_GR_3A FMINNMP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fminnmp, EA_SCALABLE, REG_V19, REG_P6, REG_V16, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinNumberPairwise(Vector left, Vector right) => MinNumberPairwise(left, right); + + + /// MinPairwise : Minimum pairwise + + /// + /// svint8_t svminp[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// SMINP Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; SMINP Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svminp[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// SMINP Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; SMINP Zresult.B, Pg/M, Zresult.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_ER_3A SMINP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sminp, EA_SCALABLE, REG_V25, REG_P4, REG_V20, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) => MinPairwise(left, right); + + /// + /// svint16_t svminp[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// SMINP Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; SMINP Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svminp[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// SMINP Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; SMINP Zresult.H, Pg/M, Zresult.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_ER_3A SMINP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sminp, EA_SCALABLE, REG_V25, REG_P4, REG_V20, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) => MinPairwise(left, right); + + /// + /// svint32_t svminp[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// SMINP Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; SMINP Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svminp[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// SMINP Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; SMINP Zresult.S, Pg/M, Zresult.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_ER_3A SMINP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sminp, EA_SCALABLE, REG_V25, REG_P4, REG_V20, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) => MinPairwise(left, right); + + /// + /// svint64_t svminp[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// SMINP Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; SMINP Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svminp[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// SMINP Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; SMINP Zresult.D, Pg/M, Zresult.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_ER_3A SMINP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sminp, EA_SCALABLE, REG_V25, REG_P4, REG_V20, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) => MinPairwise(left, right); + + /// + /// svuint8_t svminp[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// UMINP Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; UMINP Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svminp[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// UMINP Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; UMINP Zresult.B, Pg/M, Zresult.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_ER_3A UMINP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uminp, EA_SCALABLE, REG_V27, REG_P2, REG_V22, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) => MinPairwise(left, right); + + /// + /// svuint16_t svminp[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// UMINP Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; UMINP Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svminp[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// UMINP Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; UMINP Zresult.H, Pg/M, Zresult.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_ER_3A UMINP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uminp, EA_SCALABLE, REG_V27, REG_P2, REG_V22, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) => MinPairwise(left, right); + + /// + /// svuint32_t svminp[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// UMINP Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; UMINP Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svminp[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// UMINP Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; UMINP Zresult.S, Pg/M, Zresult.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_ER_3A UMINP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uminp, EA_SCALABLE, REG_V27, REG_P2, REG_V22, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) => MinPairwise(left, right); + + /// + /// svuint64_t svminp[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// UMINP Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; UMINP Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svminp[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// UMINP Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; UMINP Zresult.D, Pg/M, Zresult.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_ER_3A UMINP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uminp, EA_SCALABLE, REG_V27, REG_P2, REG_V22, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) => MinPairwise(left, right); + + /// + /// svfloat32_t svminp[_f32]_m(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FMINP Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; FMINP Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svfloat32_t svminp[_f32]_x(svbool_t pg, svfloat32_t op1, svfloat32_t op2) + /// FMINP Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; FMINP Zresult.S, Pg/M, Zresult.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_GR_3A FMINP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fminp, EA_SCALABLE, REG_V20, REG_P7, REG_V15, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) => MinPairwise(left, right); + + /// + /// svfloat64_t svminp[_f64]_m(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FMINP Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; FMINP Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svfloat64_t svminp[_f64]_x(svbool_t pg, svfloat64_t op1, svfloat64_t op2) + /// FMINP Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; FMINP Zresult.D, Pg/M, Zresult.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_GR_3A FMINP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fminp, EA_SCALABLE, REG_V20, REG_P7, REG_V15, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) => MinPairwise(left, right); + + + /// MoveWideningLower : Move long (bottom) + + /// + /// svint16_t svmovlb[_s16](svint8_t op) + /// SSHLLB Zresult.H, Zop.B, #0 + /// + /// codegenarm64test: + /// IF_SVE_FR_2A SSHLLB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sshllb, EA_SCALABLE, REG_V0, REG_V1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sshllb, EA_SCALABLE, REG_V8, REG_V9, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sshllb, EA_SCALABLE, REG_V16, REG_V17, 8, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MoveWideningLower(Vector value) => MoveWideningLower(value); + + /// + /// svint32_t svmovlb[_s32](svint16_t op) + /// SSHLLB Zresult.S, Zop.H, #0 + /// + /// codegenarm64test: + /// IF_SVE_FR_2A SSHLLB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sshllb, EA_SCALABLE, REG_V0, REG_V1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sshllb, EA_SCALABLE, REG_V8, REG_V9, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sshllb, EA_SCALABLE, REG_V16, REG_V17, 8, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MoveWideningLower(Vector value) => MoveWideningLower(value); + + /// + /// svint64_t svmovlb[_s64](svint32_t op) + /// SSHLLB Zresult.D, Zop.S, #0 + /// + /// codegenarm64test: + /// IF_SVE_FR_2A SSHLLB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sshllb, EA_SCALABLE, REG_V0, REG_V1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sshllb, EA_SCALABLE, REG_V8, REG_V9, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sshllb, EA_SCALABLE, REG_V16, REG_V17, 8, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MoveWideningLower(Vector value) => MoveWideningLower(value); + + /// + /// svuint16_t svmovlb[_u16](svuint8_t op) + /// USHLLB Zresult.H, Zop.B, #0 + /// + /// codegenarm64test: + /// IF_SVE_FR_2A USHLLB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_ushllb, EA_SCALABLE, REG_V4, REG_V5, 5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ushllb, EA_SCALABLE, REG_V12, REG_V13, 10, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ushllb, EA_SCALABLE, REG_V20, REG_V21, 24, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MoveWideningLower(Vector value) => MoveWideningLower(value); + + /// + /// svuint32_t svmovlb[_u32](svuint16_t op) + /// USHLLB Zresult.S, Zop.H, #0 + /// + /// codegenarm64test: + /// IF_SVE_FR_2A USHLLB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_ushllb, EA_SCALABLE, REG_V4, REG_V5, 5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ushllb, EA_SCALABLE, REG_V12, REG_V13, 10, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ushllb, EA_SCALABLE, REG_V20, REG_V21, 24, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MoveWideningLower(Vector value) => MoveWideningLower(value); + + /// + /// svuint64_t svmovlb[_u64](svuint32_t op) + /// USHLLB Zresult.D, Zop.S, #0 + /// + /// codegenarm64test: + /// IF_SVE_FR_2A USHLLB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_ushllb, EA_SCALABLE, REG_V4, REG_V5, 5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ushllb, EA_SCALABLE, REG_V12, REG_V13, 10, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ushllb, EA_SCALABLE, REG_V20, REG_V21, 24, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MoveWideningLower(Vector value) => MoveWideningLower(value); + + + /// MoveWideningUpper : Move long (top) + + /// + /// svint16_t svmovlt[_s16](svint8_t op) + /// SSHLLT Zresult.H, Zop.B, #0 + /// + /// codegenarm64test: + /// IF_SVE_FR_2A SSHLLT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sshllt, EA_SCALABLE, REG_V2, REG_V3, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sshllt, EA_SCALABLE, REG_V10, REG_V11, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sshllt, EA_SCALABLE, REG_V18, REG_V19, 16, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MoveWideningUpper(Vector value) => MoveWideningUpper(value); + + /// + /// svint32_t svmovlt[_s32](svint16_t op) + /// SSHLLT Zresult.S, Zop.H, #0 + /// + /// codegenarm64test: + /// IF_SVE_FR_2A SSHLLT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sshllt, EA_SCALABLE, REG_V2, REG_V3, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sshllt, EA_SCALABLE, REG_V10, REG_V11, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sshllt, EA_SCALABLE, REG_V18, REG_V19, 16, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MoveWideningUpper(Vector value) => MoveWideningUpper(value); + + /// + /// svint64_t svmovlt[_s64](svint32_t op) + /// SSHLLT Zresult.D, Zop.S, #0 + /// + /// codegenarm64test: + /// IF_SVE_FR_2A SSHLLT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sshllt, EA_SCALABLE, REG_V2, REG_V3, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sshllt, EA_SCALABLE, REG_V10, REG_V11, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sshllt, EA_SCALABLE, REG_V18, REG_V19, 16, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MoveWideningUpper(Vector value) => MoveWideningUpper(value); + + /// + /// svuint16_t svmovlt[_u16](svuint8_t op) + /// USHLLT Zresult.H, Zop.B, #0 + /// + /// codegenarm64test: + /// IF_SVE_FR_2A USHLLT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_ushllt, EA_SCALABLE, REG_V6, REG_V7, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ushllt, EA_SCALABLE, REG_V14, REG_V15, 15, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ushllt, EA_SCALABLE, REG_V22, REG_V23, 31, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MoveWideningUpper(Vector value) => MoveWideningUpper(value); + + /// + /// svuint32_t svmovlt[_u32](svuint16_t op) + /// USHLLT Zresult.S, Zop.H, #0 + /// + /// codegenarm64test: + /// IF_SVE_FR_2A USHLLT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_ushllt, EA_SCALABLE, REG_V6, REG_V7, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ushllt, EA_SCALABLE, REG_V14, REG_V15, 15, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ushllt, EA_SCALABLE, REG_V22, REG_V23, 31, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MoveWideningUpper(Vector value) => MoveWideningUpper(value); + + /// + /// svuint64_t svmovlt[_u64](svuint32_t op) + /// USHLLT Zresult.D, Zop.S, #0 + /// + /// codegenarm64test: + /// IF_SVE_FR_2A USHLLT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_ushllt, EA_SCALABLE, REG_V6, REG_V7, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ushllt, EA_SCALABLE, REG_V14, REG_V15, 15, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ushllt, EA_SCALABLE, REG_V22, REG_V23, 31, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MoveWideningUpper(Vector value) => MoveWideningUpper(value); + + + /// MultiplyAddBySelectedScalar : Multiply-add, addend first + + /// + /// svint16_t svmla_lane[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// MLA Ztied1.H, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; MLA Zresult.H, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLA ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mla, EA_SCALABLE, REG_V0, REG_P0, REG_P0, REG_V19, INS_OPTS_SCALABLE_B); + /// IF_SVE_FF_3A MLA .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLA .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLA .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector MultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); + + /// + /// svint32_t svmla_lane[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// MLA Ztied1.S, Zop2.S, Zop3.S[imm_index] + /// MOVPRFX Zresult, Zop1; MLA Zresult.S, Zop2.S, Zop3.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLA ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mla, EA_SCALABLE, REG_V0, REG_P0, REG_P0, REG_V19, INS_OPTS_SCALABLE_B); + /// IF_SVE_FF_3A MLA .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLA .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLA .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector MultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); + + /// + /// svint64_t svmla_lane[_s64](svint64_t op1, svint64_t op2, svint64_t op3, uint64_t imm_index) + /// MLA Ztied1.D, Zop2.D, Zop3.D[imm_index] + /// MOVPRFX Zresult, Zop1; MLA Zresult.D, Zop2.D, Zop3.D[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLA ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mla, EA_SCALABLE, REG_V0, REG_P0, REG_P0, REG_V19, INS_OPTS_SCALABLE_B); + /// IF_SVE_FF_3A MLA .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLA .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLA .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector MultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); + + /// + /// svuint16_t svmla_lane[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) + /// MLA Ztied1.H, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; MLA Zresult.H, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLA ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mla, EA_SCALABLE, REG_V0, REG_P0, REG_P0, REG_V19, INS_OPTS_SCALABLE_B); + /// IF_SVE_FF_3A MLA .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLA .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLA .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector MultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); + + /// + /// svuint32_t svmla_lane[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index) + /// MLA Ztied1.S, Zop2.S, Zop3.S[imm_index] + /// MOVPRFX Zresult, Zop1; MLA Zresult.S, Zop2.S, Zop3.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLA ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mla, EA_SCALABLE, REG_V0, REG_P0, REG_P0, REG_V19, INS_OPTS_SCALABLE_B); + /// IF_SVE_FF_3A MLA .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLA .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLA .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector MultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); + + /// + /// svuint64_t svmla_lane[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3, uint64_t imm_index) + /// MLA Ztied1.D, Zop2.D, Zop3.D[imm_index] + /// MOVPRFX Zresult, Zop1; MLA Zresult.D, Zop2.D, Zop3.D[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLA ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mla, EA_SCALABLE, REG_V0, REG_P0, REG_P0, REG_V19, INS_OPTS_SCALABLE_B); + /// IF_SVE_FF_3A MLA .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLA .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLA .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mla, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector MultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); + + + /// MultiplyAddRotateComplex : Complex multiply-add with rotate + + /// + /// svint8_t svcmla[_s8](svint8_t op1, svint8_t op2, svint8_t op3, uint64_t imm_rotation) + /// CMLA Ztied1.B, Zop2.B, Zop3.B, #imm_rotation + /// MOVPRFX Zresult, Zop1; CMLA Zresult.B, Zop2.B, Zop3.B, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_EK_3A CMLA ., ., ., + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V1, REG_V2, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V3, REG_V4, REG_V5, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V7, REG_V8, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V9, REG_V10, REG_V11, 270, INS_OPTS_SCALABLE_D); + /// IF_SVE_FB_3A CMLA .H, .H, .H[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V7, REG_V1, 3, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V2, REG_V5, REG_V3, 2, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V4, REG_V3, REG_V5, 1, 180, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V1, REG_V7, 0, 270, INS_OPTS_SCALABLE_H); + /// IF_SVE_FB_3B CMLA .S, .S, .S[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1, 270, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) => MultiplyAddRotateComplex(addend, left, right, rotation); + + /// + /// svint16_t svcmla[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_rotation) + /// CMLA Ztied1.H, Zop2.H, Zop3.H, #imm_rotation + /// MOVPRFX Zresult, Zop1; CMLA Zresult.H, Zop2.H, Zop3.H, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_EK_3A CMLA ., ., ., + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V1, REG_V2, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V3, REG_V4, REG_V5, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V7, REG_V8, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V9, REG_V10, REG_V11, 270, INS_OPTS_SCALABLE_D); + /// IF_SVE_FB_3A CMLA .H, .H, .H[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V7, REG_V1, 3, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V2, REG_V5, REG_V3, 2, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V4, REG_V3, REG_V5, 1, 180, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V1, REG_V7, 0, 270, INS_OPTS_SCALABLE_H); + /// IF_SVE_FB_3B CMLA .S, .S, .S[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1, 270, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) => MultiplyAddRotateComplex(addend, left, right, rotation); + + /// + /// svint32_t svcmla[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_rotation) + /// CMLA Ztied1.S, Zop2.S, Zop3.S, #imm_rotation + /// MOVPRFX Zresult, Zop1; CMLA Zresult.S, Zop2.S, Zop3.S, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_EK_3A CMLA ., ., ., + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V1, REG_V2, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V3, REG_V4, REG_V5, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V7, REG_V8, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V9, REG_V10, REG_V11, 270, INS_OPTS_SCALABLE_D); + /// IF_SVE_FB_3A CMLA .H, .H, .H[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V7, REG_V1, 3, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V2, REG_V5, REG_V3, 2, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V4, REG_V3, REG_V5, 1, 180, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V1, REG_V7, 0, 270, INS_OPTS_SCALABLE_H); + /// IF_SVE_FB_3B CMLA .S, .S, .S[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1, 270, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) => MultiplyAddRotateComplex(addend, left, right, rotation); + + /// + /// svint64_t svcmla[_s64](svint64_t op1, svint64_t op2, svint64_t op3, uint64_t imm_rotation) + /// CMLA Ztied1.D, Zop2.D, Zop3.D, #imm_rotation + /// MOVPRFX Zresult, Zop1; CMLA Zresult.D, Zop2.D, Zop3.D, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_EK_3A CMLA ., ., ., + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V1, REG_V2, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V3, REG_V4, REG_V5, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V7, REG_V8, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V9, REG_V10, REG_V11, 270, INS_OPTS_SCALABLE_D); + /// IF_SVE_FB_3A CMLA .H, .H, .H[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V7, REG_V1, 3, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V2, REG_V5, REG_V3, 2, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V4, REG_V3, REG_V5, 1, 180, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V1, REG_V7, 0, 270, INS_OPTS_SCALABLE_H); + /// IF_SVE_FB_3B CMLA .S, .S, .S[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1, 270, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) => MultiplyAddRotateComplex(addend, left, right, rotation); + + /// + /// svuint8_t svcmla[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3, uint64_t imm_rotation) + /// CMLA Ztied1.B, Zop2.B, Zop3.B, #imm_rotation + /// MOVPRFX Zresult, Zop1; CMLA Zresult.B, Zop2.B, Zop3.B, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_EK_3A CMLA ., ., ., + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V1, REG_V2, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V3, REG_V4, REG_V5, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V7, REG_V8, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V9, REG_V10, REG_V11, 270, INS_OPTS_SCALABLE_D); + /// IF_SVE_FB_3A CMLA .H, .H, .H[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V7, REG_V1, 3, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V2, REG_V5, REG_V3, 2, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V4, REG_V3, REG_V5, 1, 180, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V1, REG_V7, 0, 270, INS_OPTS_SCALABLE_H); + /// IF_SVE_FB_3B CMLA .S, .S, .S[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1, 270, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) => MultiplyAddRotateComplex(addend, left, right, rotation); + + /// + /// svuint16_t svcmla[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_rotation) + /// CMLA Ztied1.H, Zop2.H, Zop3.H, #imm_rotation + /// MOVPRFX Zresult, Zop1; CMLA Zresult.H, Zop2.H, Zop3.H, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_EK_3A CMLA ., ., ., + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V1, REG_V2, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V3, REG_V4, REG_V5, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V7, REG_V8, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V9, REG_V10, REG_V11, 270, INS_OPTS_SCALABLE_D); + /// IF_SVE_FB_3A CMLA .H, .H, .H[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V7, REG_V1, 3, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V2, REG_V5, REG_V3, 2, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V4, REG_V3, REG_V5, 1, 180, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V1, REG_V7, 0, 270, INS_OPTS_SCALABLE_H); + /// IF_SVE_FB_3B CMLA .S, .S, .S[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1, 270, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) => MultiplyAddRotateComplex(addend, left, right, rotation); + + /// + /// svuint32_t svcmla[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_rotation) + /// CMLA Ztied1.S, Zop2.S, Zop3.S, #imm_rotation + /// MOVPRFX Zresult, Zop1; CMLA Zresult.S, Zop2.S, Zop3.S, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_EK_3A CMLA ., ., ., + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V1, REG_V2, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V3, REG_V4, REG_V5, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V7, REG_V8, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V9, REG_V10, REG_V11, 270, INS_OPTS_SCALABLE_D); + /// IF_SVE_FB_3A CMLA .H, .H, .H[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V7, REG_V1, 3, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V2, REG_V5, REG_V3, 2, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V4, REG_V3, REG_V5, 1, 180, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V1, REG_V7, 0, 270, INS_OPTS_SCALABLE_H); + /// IF_SVE_FB_3B CMLA .S, .S, .S[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1, 270, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) => MultiplyAddRotateComplex(addend, left, right, rotation); + + /// + /// svuint64_t svcmla[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3, uint64_t imm_rotation) + /// CMLA Ztied1.D, Zop2.D, Zop3.D, #imm_rotation + /// MOVPRFX Zresult, Zop1; CMLA Zresult.D, Zop2.D, Zop3.D, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_EK_3A CMLA ., ., ., + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V1, REG_V2, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V3, REG_V4, REG_V5, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V7, REG_V8, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V9, REG_V10, REG_V11, 270, INS_OPTS_SCALABLE_D); + /// IF_SVE_FB_3A CMLA .H, .H, .H[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V7, REG_V1, 3, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V2, REG_V5, REG_V3, 2, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V4, REG_V3, REG_V5, 1, 180, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V1, REG_V7, 0, 270, INS_OPTS_SCALABLE_H); + /// IF_SVE_FB_3B CMLA .S, .S, .S[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1, 270, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) => MultiplyAddRotateComplex(addend, left, right, rotation); + + + /// MultiplyAddRotateComplexBySelectedScalar : Complex multiply-add with rotate + + /// + /// svint16_t svcmla_lane[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// CMLA Ztied1.H, Zop2.H, Zop3.H[imm_index], #imm_rotation + /// MOVPRFX Zresult, Zop1; CMLA Zresult.H, Zop2.H, Zop3.H[imm_index], #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_EK_3A CMLA ., ., ., + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V1, REG_V2, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V3, REG_V4, REG_V5, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V7, REG_V8, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V9, REG_V10, REG_V11, 270, INS_OPTS_SCALABLE_D); + /// IF_SVE_FB_3A CMLA .H, .H, .H[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V7, REG_V1, 3, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V2, REG_V5, REG_V3, 2, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V4, REG_V3, REG_V5, 1, 180, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V1, REG_V7, 0, 270, INS_OPTS_SCALABLE_H); + /// IF_SVE_FB_3B CMLA .S, .S, .S[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1, 270, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation) => MultiplyAddRotateComplexBySelectedScalar(addend, left, right, rightIndex, rotation); + + /// + /// svint32_t svcmla_lane[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// CMLA Ztied1.S, Zop2.S, Zop3.S[imm_index], #imm_rotation + /// MOVPRFX Zresult, Zop1; CMLA Zresult.S, Zop2.S, Zop3.S[imm_index], #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_EK_3A CMLA ., ., ., + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V1, REG_V2, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V3, REG_V4, REG_V5, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V7, REG_V8, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V9, REG_V10, REG_V11, 270, INS_OPTS_SCALABLE_D); + /// IF_SVE_FB_3A CMLA .H, .H, .H[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V7, REG_V1, 3, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V2, REG_V5, REG_V3, 2, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V4, REG_V3, REG_V5, 1, 180, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V1, REG_V7, 0, 270, INS_OPTS_SCALABLE_H); + /// IF_SVE_FB_3B CMLA .S, .S, .S[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1, 270, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation) => MultiplyAddRotateComplexBySelectedScalar(addend, left, right, rightIndex, rotation); + + /// + /// svuint16_t svcmla_lane[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// CMLA Ztied1.H, Zop2.H, Zop3.H[imm_index], #imm_rotation + /// MOVPRFX Zresult, Zop1; CMLA Zresult.H, Zop2.H, Zop3.H[imm_index], #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_EK_3A CMLA ., ., ., + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V1, REG_V2, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V3, REG_V4, REG_V5, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V7, REG_V8, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V9, REG_V10, REG_V11, 270, INS_OPTS_SCALABLE_D); + /// IF_SVE_FB_3A CMLA .H, .H, .H[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V7, REG_V1, 3, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V2, REG_V5, REG_V3, 2, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V4, REG_V3, REG_V5, 1, 180, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V1, REG_V7, 0, 270, INS_OPTS_SCALABLE_H); + /// IF_SVE_FB_3B CMLA .S, .S, .S[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1, 270, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation) => MultiplyAddRotateComplexBySelectedScalar(addend, left, right, rightIndex, rotation); + + /// + /// svuint32_t svcmla_lane[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// CMLA Ztied1.S, Zop2.S, Zop3.S[imm_index], #imm_rotation + /// MOVPRFX Zresult, Zop1; CMLA Zresult.S, Zop2.S, Zop3.S[imm_index], #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_EK_3A CMLA ., ., ., + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V1, REG_V2, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V3, REG_V4, REG_V5, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V7, REG_V8, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_cmla, EA_SCALABLE, REG_V9, REG_V10, REG_V11, 270, INS_OPTS_SCALABLE_D); + /// IF_SVE_FB_3A CMLA .H, .H, .H[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V7, REG_V1, 3, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V2, REG_V5, REG_V3, 2, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V4, REG_V3, REG_V5, 1, 180, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V1, REG_V7, 0, 270, INS_OPTS_SCALABLE_H); + /// IF_SVE_FB_3B CMLA .S, .S, .S[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_cmla, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1, 270, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation) => MultiplyAddRotateComplexBySelectedScalar(addend, left, right, rightIndex, rotation); + + + /// MultiplyAddWideningLower : Multiply-add long (bottom) + + /// + /// svint16_t svmlalb[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// SMLALB Ztied1.H, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; SMLALB Zresult.H, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_EL_3A SMLALB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3A SMLALB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B SMLALB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningLower(op1, op2, op3); + + /// + /// svint32_t svmlalb[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// SMLALB Ztied1.S, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; SMLALB Zresult.S, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_EL_3A SMLALB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3A SMLALB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B SMLALB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningLower(op1, op2, op3); + + /// + /// svint32_t svmlalb_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// SMLALB Ztied1.S, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; SMLALB Zresult.S, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EL_3A SMLALB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3A SMLALB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B SMLALB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplyAddWideningLower(op1, op2, op3, imm_index); + + /// + /// svint64_t svmlalb[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// SMLALB Ztied1.D, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; SMLALB Zresult.D, Zop2.S, Zop3.S + /// + /// codegenarm64test: + /// IF_SVE_EL_3A SMLALB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3A SMLALB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B SMLALB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningLower(op1, op2, op3); + + /// + /// svint64_t svmlalb_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// SMLALB Ztied1.D, Zop2.S, Zop3.S[imm_index] + /// MOVPRFX Zresult, Zop1; SMLALB Zresult.D, Zop2.S, Zop3.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EL_3A SMLALB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3A SMLALB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B SMLALB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplyAddWideningLower(op1, op2, op3, imm_index); + + /// + /// svuint16_t svmlalb[_u16](svuint16_t op1, svuint8_t op2, svuint8_t op3) + /// UMLALB Ztied1.H, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; UMLALB Zresult.H, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_EL_3A UMLALB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umlalb, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_S); + /// IF_SVE_FG_3A UMLALB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlalb, EA_SCALABLE, REG_V8, REG_V9, REG_V4, 4, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B UMLALB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlalb, EA_SCALABLE, REG_V8, REG_V9, REG_V8, 0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningLower(op1, op2, op3); + + /// + /// svuint32_t svmlalb[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3) + /// UMLALB Ztied1.S, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; UMLALB Zresult.S, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_EL_3A UMLALB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umlalb, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_S); + /// IF_SVE_FG_3A UMLALB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlalb, EA_SCALABLE, REG_V8, REG_V9, REG_V4, 4, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B UMLALB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlalb, EA_SCALABLE, REG_V8, REG_V9, REG_V8, 0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningLower(op1, op2, op3); + + /// + /// svuint32_t svmlalb_lane[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) + /// UMLALB Ztied1.S, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; UMLALB Zresult.S, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EL_3A UMLALB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umlalb, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_S); + /// IF_SVE_FG_3A UMLALB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlalb, EA_SCALABLE, REG_V8, REG_V9, REG_V4, 4, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B UMLALB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlalb, EA_SCALABLE, REG_V8, REG_V9, REG_V8, 0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplyAddWideningLower(op1, op2, op3, imm_index); + + /// + /// svuint64_t svmlalb[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3) + /// UMLALB Ztied1.D, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; UMLALB Zresult.D, Zop2.S, Zop3.S + /// + /// codegenarm64test: + /// IF_SVE_EL_3A UMLALB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umlalb, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_S); + /// IF_SVE_FG_3A UMLALB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlalb, EA_SCALABLE, REG_V8, REG_V9, REG_V4, 4, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B UMLALB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlalb, EA_SCALABLE, REG_V8, REG_V9, REG_V8, 0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningLower(op1, op2, op3); + + /// + /// svuint64_t svmlalb_lane[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index) + /// UMLALB Ztied1.D, Zop2.S, Zop3.S[imm_index] + /// MOVPRFX Zresult, Zop1; UMLALB Zresult.D, Zop2.S, Zop3.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EL_3A UMLALB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umlalb, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_S); + /// IF_SVE_FG_3A UMLALB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlalb, EA_SCALABLE, REG_V8, REG_V9, REG_V4, 4, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B UMLALB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlalb, EA_SCALABLE, REG_V8, REG_V9, REG_V8, 0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplyAddWideningLower(op1, op2, op3, imm_index); + + + /// MultiplyAddWideningUpper : Multiply-add long (top) + + /// + /// svint16_t svmlalt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// SMLALT Ztied1.H, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; SMLALT Zresult.H, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_EL_3A SMLALT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smlalt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// IF_SVE_FG_3A SMLALT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlalt, EA_SCALABLE, REG_V2, REG_V3, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B SMLALT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlalt, EA_SCALABLE, REG_V2, REG_V3, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningUpper(op1, op2, op3); + + /// + /// svint32_t svmlalt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// SMLALT Ztied1.S, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; SMLALT Zresult.S, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_EL_3A SMLALT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smlalt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// IF_SVE_FG_3A SMLALT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlalt, EA_SCALABLE, REG_V2, REG_V3, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B SMLALT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlalt, EA_SCALABLE, REG_V2, REG_V3, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningUpper(op1, op2, op3); + + /// + /// svint32_t svmlalt_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// SMLALT Ztied1.S, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; SMLALT Zresult.S, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EL_3A SMLALT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smlalt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// IF_SVE_FG_3A SMLALT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlalt, EA_SCALABLE, REG_V2, REG_V3, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B SMLALT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlalt, EA_SCALABLE, REG_V2, REG_V3, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplyAddWideningUpper(op1, op2, op3, imm_index); + + /// + /// svint64_t svmlalt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// SMLALT Ztied1.D, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; SMLALT Zresult.D, Zop2.S, Zop3.S + /// + /// codegenarm64test: + /// IF_SVE_EL_3A SMLALT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smlalt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// IF_SVE_FG_3A SMLALT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlalt, EA_SCALABLE, REG_V2, REG_V3, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B SMLALT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlalt, EA_SCALABLE, REG_V2, REG_V3, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningUpper(op1, op2, op3); + + /// + /// svint64_t svmlalt_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// SMLALT Ztied1.D, Zop2.S, Zop3.S[imm_index] + /// MOVPRFX Zresult, Zop1; SMLALT Zresult.D, Zop2.S, Zop3.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EL_3A SMLALT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smlalt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// IF_SVE_FG_3A SMLALT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlalt, EA_SCALABLE, REG_V2, REG_V3, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B SMLALT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlalt, EA_SCALABLE, REG_V2, REG_V3, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplyAddWideningUpper(op1, op2, op3, imm_index); + + /// + /// svuint16_t svmlalt[_u16](svuint16_t op1, svuint8_t op2, svuint8_t op3) + /// UMLALT Ztied1.H, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; UMLALT Zresult.H, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_EL_3A UMLALT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umlalt, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_D); + /// IF_SVE_FG_3A UMLALT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlalt, EA_SCALABLE, REG_V10, REG_V11, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B UMLALT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlalt, EA_SCALABLE, REG_V10, REG_V11, REG_V10, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningUpper(op1, op2, op3); + + /// + /// svuint32_t svmlalt[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3) + /// UMLALT Ztied1.S, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; UMLALT Zresult.S, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_EL_3A UMLALT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umlalt, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_D); + /// IF_SVE_FG_3A UMLALT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlalt, EA_SCALABLE, REG_V10, REG_V11, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B UMLALT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlalt, EA_SCALABLE, REG_V10, REG_V11, REG_V10, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningUpper(op1, op2, op3); + + /// + /// svuint32_t svmlalt_lane[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) + /// UMLALT Ztied1.S, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; UMLALT Zresult.S, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EL_3A UMLALT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umlalt, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_D); + /// IF_SVE_FG_3A UMLALT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlalt, EA_SCALABLE, REG_V10, REG_V11, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B UMLALT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlalt, EA_SCALABLE, REG_V10, REG_V11, REG_V10, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplyAddWideningUpper(op1, op2, op3, imm_index); + + /// + /// svuint64_t svmlalt[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3) + /// UMLALT Ztied1.D, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; UMLALT Zresult.D, Zop2.S, Zop3.S + /// + /// codegenarm64test: + /// IF_SVE_EL_3A UMLALT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umlalt, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_D); + /// IF_SVE_FG_3A UMLALT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlalt, EA_SCALABLE, REG_V10, REG_V11, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B UMLALT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlalt, EA_SCALABLE, REG_V10, REG_V11, REG_V10, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningUpper(op1, op2, op3); + + /// + /// svuint64_t svmlalt_lane[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index) + /// UMLALT Ztied1.D, Zop2.S, Zop3.S[imm_index] + /// MOVPRFX Zresult, Zop1; UMLALT Zresult.D, Zop2.S, Zop3.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EL_3A UMLALT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umlalt, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_D); + /// IF_SVE_FG_3A UMLALT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlalt, EA_SCALABLE, REG_V10, REG_V11, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B UMLALT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlalt, EA_SCALABLE, REG_V10, REG_V11, REG_V10, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplyAddWideningUpper(op1, op2, op3, imm_index); + + + /// MultiplyBySelectedScalar : Multiply + + /// + /// svint16_t svmul_lane[_s16](svint16_t op1, svint16_t op2, uint64_t imm_index) + /// MUL Zresult.H, Zop1.H, Zop2.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_AE_3A MUL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_P1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_BD_3A MUL ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_V0, REG_V31, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_FD_3A MUL .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FD_3B MUL .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FD_3C MUL .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_EE_1A MUL ., ., # + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); + + /// + /// svint32_t svmul_lane[_s32](svint32_t op1, svint32_t op2, uint64_t imm_index) + /// MUL Zresult.S, Zop1.S, Zop2.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_AE_3A MUL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_P1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_BD_3A MUL ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_V0, REG_V31, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_FD_3A MUL .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FD_3B MUL .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FD_3C MUL .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_EE_1A MUL ., ., # + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); + + /// + /// svint64_t svmul_lane[_s64](svint64_t op1, svint64_t op2, uint64_t imm_index) + /// MUL Zresult.D, Zop1.D, Zop2.D[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_AE_3A MUL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_P1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_BD_3A MUL ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_V0, REG_V31, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_FD_3A MUL .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FD_3B MUL .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FD_3C MUL .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_EE_1A MUL ., ., # + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); + + /// + /// svuint16_t svmul_lane[_u16](svuint16_t op1, svuint16_t op2, uint64_t imm_index) + /// MUL Zresult.H, Zop1.H, Zop2.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_AE_3A MUL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_P1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_BD_3A MUL ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_V0, REG_V31, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_FD_3A MUL .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FD_3B MUL .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FD_3C MUL .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_EE_1A MUL ., ., # + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); + + /// + /// svuint32_t svmul_lane[_u32](svuint32_t op1, svuint32_t op2, uint64_t imm_index) + /// MUL Zresult.S, Zop1.S, Zop2.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_AE_3A MUL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_P1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_BD_3A MUL ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_V0, REG_V31, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_FD_3A MUL .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FD_3B MUL .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FD_3C MUL .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_EE_1A MUL ., ., # + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); + + /// + /// svuint64_t svmul_lane[_u64](svuint64_t op1, svuint64_t op2, uint64_t imm_index) + /// MUL Zresult.D, Zop1.D, Zop2.D[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_AE_3A MUL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_P1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_BD_3A MUL ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_mul, EA_SCALABLE, REG_V5, REG_V0, REG_V31, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_FD_3A MUL .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FD_3B MUL .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V8, REG_V9, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V10, REG_V11, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FD_3C MUL .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mul, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_EE_1A MUL ., ., # + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_mul, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); + + + /// MultiplySubtractBySelectedScalar : Multiply-subtract, minuend first + + /// + /// svint16_t svmls_lane[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// MLS Ztied1.H, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; MLS Zresult.H, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLS ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mls, EA_SCALABLE, REG_V2, REG_P1, REG_V31, REG_V31, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3A MLS .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLS .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLS .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector MultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); + + /// + /// svint32_t svmls_lane[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// MLS Ztied1.S, Zop2.S, Zop3.S[imm_index] + /// MOVPRFX Zresult, Zop1; MLS Zresult.S, Zop2.S, Zop3.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLS ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mls, EA_SCALABLE, REG_V2, REG_P1, REG_V31, REG_V31, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3A MLS .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLS .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLS .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector MultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); + + /// + /// svint64_t svmls_lane[_s64](svint64_t op1, svint64_t op2, svint64_t op3, uint64_t imm_index) + /// MLS Ztied1.D, Zop2.D, Zop3.D[imm_index] + /// MOVPRFX Zresult, Zop1; MLS Zresult.D, Zop2.D, Zop3.D[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLS ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mls, EA_SCALABLE, REG_V2, REG_P1, REG_V31, REG_V31, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3A MLS .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLS .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLS .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector MultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); + + /// + /// svuint16_t svmls_lane[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) + /// MLS Ztied1.H, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; MLS Zresult.H, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLS ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mls, EA_SCALABLE, REG_V2, REG_P1, REG_V31, REG_V31, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3A MLS .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLS .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLS .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector MultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); + + /// + /// svuint32_t svmls_lane[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index) + /// MLS Ztied1.S, Zop2.S, Zop3.S[imm_index] + /// MOVPRFX Zresult, Zop1; MLS Zresult.S, Zop2.S, Zop3.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLS ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mls, EA_SCALABLE, REG_V2, REG_P1, REG_V31, REG_V31, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3A MLS .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLS .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLS .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector MultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); + + /// + /// svuint64_t svmls_lane[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3, uint64_t imm_index) + /// MLS Ztied1.D, Zop2.D, Zop3.D[imm_index] + /// MOVPRFX Zresult, Zop1; MLS Zresult.D, Zop2.D, Zop3.D[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_AR_4A MLS ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_mls, EA_SCALABLE, REG_V2, REG_P1, REG_V31, REG_V31, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3A MLS .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FF_3B MLS .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V12, REG_V13, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FF_3C MLS .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_mls, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector MultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); + + + /// MultiplySubtractWideningLower : Multiply-subtract long (bottom) + + /// + /// svint16_t svmlslb[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// SMLSLB Ztied1.H, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; SMLSLB Zresult.H, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_EL_3A SMLSLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smlslb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// IF_SVE_FG_3A SMLSLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlslb, EA_SCALABLE, REG_V4, REG_V5, REG_V2, 2, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B SMLSLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlslb, EA_SCALABLE, REG_V4, REG_V5, REG_V4, 2, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningLower(op1, op2, op3); + + /// + /// svint32_t svmlslb[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// SMLSLB Ztied1.S, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; SMLSLB Zresult.S, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_EL_3A SMLSLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smlslb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// IF_SVE_FG_3A SMLSLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlslb, EA_SCALABLE, REG_V4, REG_V5, REG_V2, 2, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B SMLSLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlslb, EA_SCALABLE, REG_V4, REG_V5, REG_V4, 2, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningLower(op1, op2, op3); + + /// + /// svint32_t svmlslb_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// SMLSLB Ztied1.S, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; SMLSLB Zresult.S, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EL_3A SMLSLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smlslb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// IF_SVE_FG_3A SMLSLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlslb, EA_SCALABLE, REG_V4, REG_V5, REG_V2, 2, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B SMLSLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlslb, EA_SCALABLE, REG_V4, REG_V5, REG_V4, 2, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplySubtractWideningLower(op1, op2, op3, imm_index); + + /// + /// svint64_t svmlslb[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// SMLSLB Ztied1.D, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; SMLSLB Zresult.D, Zop2.S, Zop3.S + /// + /// codegenarm64test: + /// IF_SVE_EL_3A SMLSLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smlslb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// IF_SVE_FG_3A SMLSLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlslb, EA_SCALABLE, REG_V4, REG_V5, REG_V2, 2, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B SMLSLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlslb, EA_SCALABLE, REG_V4, REG_V5, REG_V4, 2, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningLower(op1, op2, op3); + + /// + /// svint64_t svmlslb_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// SMLSLB Ztied1.D, Zop2.S, Zop3.S[imm_index] + /// MOVPRFX Zresult, Zop1; SMLSLB Zresult.D, Zop2.S, Zop3.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EL_3A SMLSLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smlslb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// IF_SVE_FG_3A SMLSLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlslb, EA_SCALABLE, REG_V4, REG_V5, REG_V2, 2, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B SMLSLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlslb, EA_SCALABLE, REG_V4, REG_V5, REG_V4, 2, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplySubtractWideningLower(op1, op2, op3, imm_index); + + /// + /// svuint16_t svmlslb[_u16](svuint16_t op1, svuint8_t op2, svuint8_t op3) + /// UMLSLB Ztied1.H, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; UMLSLB Zresult.H, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_EL_3A UMLSLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umlslb, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3A UMLSLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlslb, EA_SCALABLE, REG_V12, REG_V13, REG_V6, 6, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B UMLSLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlslb, EA_SCALABLE, REG_V12, REG_V13, REG_V12, 2, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningLower(op1, op2, op3); + + /// + /// svuint32_t svmlslb[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3) + /// UMLSLB Ztied1.S, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; UMLSLB Zresult.S, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_EL_3A UMLSLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umlslb, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3A UMLSLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlslb, EA_SCALABLE, REG_V12, REG_V13, REG_V6, 6, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B UMLSLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlslb, EA_SCALABLE, REG_V12, REG_V13, REG_V12, 2, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningLower(op1, op2, op3); + + /// + /// svuint32_t svmlslb_lane[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) + /// UMLSLB Ztied1.S, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; UMLSLB Zresult.S, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EL_3A UMLSLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umlslb, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3A UMLSLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlslb, EA_SCALABLE, REG_V12, REG_V13, REG_V6, 6, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B UMLSLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlslb, EA_SCALABLE, REG_V12, REG_V13, REG_V12, 2, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplySubtractWideningLower(op1, op2, op3, imm_index); + + /// + /// svuint64_t svmlslb[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3) + /// UMLSLB Ztied1.D, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; UMLSLB Zresult.D, Zop2.S, Zop3.S + /// + /// codegenarm64test: + /// IF_SVE_EL_3A UMLSLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umlslb, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3A UMLSLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlslb, EA_SCALABLE, REG_V12, REG_V13, REG_V6, 6, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B UMLSLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlslb, EA_SCALABLE, REG_V12, REG_V13, REG_V12, 2, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningLower(op1, op2, op3); + + /// + /// svuint64_t svmlslb_lane[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index) + /// UMLSLB Ztied1.D, Zop2.S, Zop3.S[imm_index] + /// MOVPRFX Zresult, Zop1; UMLSLB Zresult.D, Zop2.S, Zop3.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EL_3A UMLSLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umlslb, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3A UMLSLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlslb, EA_SCALABLE, REG_V12, REG_V13, REG_V6, 6, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B UMLSLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlslb, EA_SCALABLE, REG_V12, REG_V13, REG_V12, 2, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplySubtractWideningLower(op1, op2, op3, imm_index); + + + /// MultiplySubtractWideningUpper : Multiply-subtract long (top) + + /// + /// svint16_t svmlslt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// SMLSLT Ztied1.H, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; SMLSLT Zresult.H, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_EL_3A SMLSLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smlslt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3A SMLSLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlslt, EA_SCALABLE, REG_V6, REG_V7, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B SMLSLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlslt, EA_SCALABLE, REG_V6, REG_V7, REG_V6, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningUpper(op1, op2, op3); + + /// + /// svint32_t svmlslt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// SMLSLT Ztied1.S, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; SMLSLT Zresult.S, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_EL_3A SMLSLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smlslt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3A SMLSLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlslt, EA_SCALABLE, REG_V6, REG_V7, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B SMLSLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlslt, EA_SCALABLE, REG_V6, REG_V7, REG_V6, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningUpper(op1, op2, op3); + + /// + /// svint32_t svmlslt_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// SMLSLT Ztied1.S, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; SMLSLT Zresult.S, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EL_3A SMLSLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smlslt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3A SMLSLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlslt, EA_SCALABLE, REG_V6, REG_V7, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B SMLSLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlslt, EA_SCALABLE, REG_V6, REG_V7, REG_V6, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplySubtractWideningUpper(op1, op2, op3, imm_index); + + /// + /// svint64_t svmlslt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// SMLSLT Ztied1.D, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; SMLSLT Zresult.D, Zop2.S, Zop3.S + /// + /// codegenarm64test: + /// IF_SVE_EL_3A SMLSLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smlslt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3A SMLSLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlslt, EA_SCALABLE, REG_V6, REG_V7, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B SMLSLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlslt, EA_SCALABLE, REG_V6, REG_V7, REG_V6, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningUpper(op1, op2, op3); + + /// + /// svint64_t svmlslt_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// SMLSLT Ztied1.D, Zop2.S, Zop3.S[imm_index] + /// MOVPRFX Zresult, Zop1; SMLSLT Zresult.D, Zop2.S, Zop3.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EL_3A SMLSLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smlslt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3A SMLSLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlslt, EA_SCALABLE, REG_V6, REG_V7, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B SMLSLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smlslt, EA_SCALABLE, REG_V6, REG_V7, REG_V6, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplySubtractWideningUpper(op1, op2, op3, imm_index); + + /// + /// svuint16_t svmlslt[_u16](svuint16_t op1, svuint8_t op2, svuint8_t op3) + /// UMLSLT Ztied1.H, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; UMLSLT Zresult.H, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_EL_3A UMLSLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umlslt, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_S); + /// IF_SVE_FG_3A UMLSLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlslt, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B UMLSLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlslt, EA_SCALABLE, REG_V14, REG_V15, REG_V14, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningUpper(op1, op2, op3); + + /// + /// svuint32_t svmlslt[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3) + /// UMLSLT Ztied1.S, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; UMLSLT Zresult.S, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_EL_3A UMLSLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umlslt, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_S); + /// IF_SVE_FG_3A UMLSLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlslt, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B UMLSLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlslt, EA_SCALABLE, REG_V14, REG_V15, REG_V14, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningUpper(op1, op2, op3); + + /// + /// svuint32_t svmlslt_lane[_u32](svuint32_t op1, svuint16_t op2, svuint16_t op3, uint64_t imm_index) + /// UMLSLT Ztied1.S, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; UMLSLT Zresult.S, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EL_3A UMLSLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umlslt, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_S); + /// IF_SVE_FG_3A UMLSLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlslt, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B UMLSLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlslt, EA_SCALABLE, REG_V14, REG_V15, REG_V14, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplySubtractWideningUpper(op1, op2, op3, imm_index); + + /// + /// svuint64_t svmlslt[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3) + /// UMLSLT Ztied1.D, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; UMLSLT Zresult.D, Zop2.S, Zop3.S + /// + /// codegenarm64test: + /// IF_SVE_EL_3A UMLSLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umlslt, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_S); + /// IF_SVE_FG_3A UMLSLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlslt, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B UMLSLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlslt, EA_SCALABLE, REG_V14, REG_V15, REG_V14, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningUpper(op1, op2, op3); + + /// + /// svuint64_t svmlslt_lane[_u64](svuint64_t op1, svuint32_t op2, svuint32_t op3, uint64_t imm_index) + /// UMLSLT Ztied1.D, Zop2.S, Zop3.S[imm_index] + /// MOVPRFX Zresult, Zop1; UMLSLT Zresult.D, Zop2.S, Zop3.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EL_3A UMLSLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umlslt, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_S); + /// IF_SVE_FG_3A UMLSLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlslt, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FG_3B UMLSLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umlslt, EA_SCALABLE, REG_V14, REG_V15, REG_V14, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplySubtractWideningUpper(op1, op2, op3, imm_index); + + + /// MultiplyWideningLower : Multiply long (bottom) + + /// + /// svint16_t svmullb[_s16](svint8_t op1, svint8_t op2) + /// SMULLB Zresult.H, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FN_3A SMULLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smullb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3A SMULLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullb, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullb, EA_SCALABLE, REG_V2, REG_V3, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3B SMULLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullb, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullb, EA_SCALABLE, REG_V2, REG_V3, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyWideningLower(Vector left, Vector right) => MultiplyWideningLower(left, right); + + /// + /// svint32_t svmullb[_s32](svint16_t op1, svint16_t op2) + /// SMULLB Zresult.S, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FN_3A SMULLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smullb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3A SMULLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullb, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullb, EA_SCALABLE, REG_V2, REG_V3, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3B SMULLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullb, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullb, EA_SCALABLE, REG_V2, REG_V3, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyWideningLower(Vector left, Vector right) => MultiplyWideningLower(left, right); + + /// + /// svint32_t svmullb_lane[_s32](svint16_t op1, svint16_t op2, uint64_t imm_index) + /// SMULLB Zresult.S, Zop1.H, Zop2.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_FN_3A SMULLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smullb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3A SMULLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullb, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullb, EA_SCALABLE, REG_V2, REG_V3, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3B SMULLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullb, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullb, EA_SCALABLE, REG_V2, REG_V3, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyWideningLower(Vector op1, Vector op2, ulong imm_index) => MultiplyWideningLower(op1, op2, imm_index); + + /// + /// svint64_t svmullb[_s64](svint32_t op1, svint32_t op2) + /// SMULLB Zresult.D, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FN_3A SMULLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smullb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3A SMULLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullb, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullb, EA_SCALABLE, REG_V2, REG_V3, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3B SMULLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullb, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullb, EA_SCALABLE, REG_V2, REG_V3, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyWideningLower(Vector left, Vector right) => MultiplyWideningLower(left, right); + + /// + /// svint64_t svmullb_lane[_s64](svint32_t op1, svint32_t op2, uint64_t imm_index) + /// SMULLB Zresult.D, Zop1.S, Zop2.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_FN_3A SMULLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smullb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3A SMULLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullb, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullb, EA_SCALABLE, REG_V2, REG_V3, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3B SMULLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullb, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullb, EA_SCALABLE, REG_V2, REG_V3, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyWideningLower(Vector op1, Vector op2, ulong imm_index) => MultiplyWideningLower(op1, op2, imm_index); + + /// + /// svuint16_t svmullb[_u16](svuint8_t op1, svuint8_t op2) + /// UMULLB Zresult.H, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FN_3A UMULLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umullb, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3A UMULLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullb, EA_SCALABLE, REG_V8, REG_V9, REG_V4, 4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullb, EA_SCALABLE, REG_V10, REG_V11, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3B UMULLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullb, EA_SCALABLE, REG_V8, REG_V9, REG_V8, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullb, EA_SCALABLE, REG_V10, REG_V11, REG_V10, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyWideningLower(Vector left, Vector right) => MultiplyWideningLower(left, right); + + /// + /// svuint32_t svmullb[_u32](svuint16_t op1, svuint16_t op2) + /// UMULLB Zresult.S, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FN_3A UMULLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umullb, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3A UMULLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullb, EA_SCALABLE, REG_V8, REG_V9, REG_V4, 4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullb, EA_SCALABLE, REG_V10, REG_V11, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3B UMULLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullb, EA_SCALABLE, REG_V8, REG_V9, REG_V8, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullb, EA_SCALABLE, REG_V10, REG_V11, REG_V10, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyWideningLower(Vector left, Vector right) => MultiplyWideningLower(left, right); + + /// + /// svuint32_t svmullb_lane[_u32](svuint16_t op1, svuint16_t op2, uint64_t imm_index) + /// UMULLB Zresult.S, Zop1.H, Zop2.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_FN_3A UMULLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umullb, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3A UMULLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullb, EA_SCALABLE, REG_V8, REG_V9, REG_V4, 4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullb, EA_SCALABLE, REG_V10, REG_V11, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3B UMULLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullb, EA_SCALABLE, REG_V8, REG_V9, REG_V8, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullb, EA_SCALABLE, REG_V10, REG_V11, REG_V10, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyWideningLower(Vector op1, Vector op2, ulong imm_index) => MultiplyWideningLower(op1, op2, imm_index); + + /// + /// svuint64_t svmullb[_u64](svuint32_t op1, svuint32_t op2) + /// UMULLB Zresult.D, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FN_3A UMULLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umullb, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3A UMULLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullb, EA_SCALABLE, REG_V8, REG_V9, REG_V4, 4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullb, EA_SCALABLE, REG_V10, REG_V11, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3B UMULLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullb, EA_SCALABLE, REG_V8, REG_V9, REG_V8, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullb, EA_SCALABLE, REG_V10, REG_V11, REG_V10, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyWideningLower(Vector left, Vector right) => MultiplyWideningLower(left, right); + + /// + /// svuint64_t svmullb_lane[_u64](svuint32_t op1, svuint32_t op2, uint64_t imm_index) + /// UMULLB Zresult.D, Zop1.S, Zop2.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_FN_3A UMULLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umullb, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3A UMULLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullb, EA_SCALABLE, REG_V8, REG_V9, REG_V4, 4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullb, EA_SCALABLE, REG_V10, REG_V11, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3B UMULLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullb, EA_SCALABLE, REG_V8, REG_V9, REG_V8, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullb, EA_SCALABLE, REG_V10, REG_V11, REG_V10, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyWideningLower(Vector op1, Vector op2, ulong imm_index) => MultiplyWideningLower(op1, op2, imm_index); + + + /// MultiplyWideningUpper : Multiply long (top) + + /// + /// svint16_t svmullt[_s16](svint8_t op1, svint8_t op2) + /// SMULLT Zresult.H, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FN_3A SMULLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smullt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// IF_SVE_FE_3A SMULLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullt, EA_SCALABLE, REG_V4, REG_V5, REG_V2, 2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullt, EA_SCALABLE, REG_V6, REG_V7, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3B SMULLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullt, EA_SCALABLE, REG_V4, REG_V5, REG_V4, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullt, EA_SCALABLE, REG_V6, REG_V7, REG_V6, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyWideningUpper(Vector left, Vector right) => MultiplyWideningUpper(left, right); + + /// + /// svint32_t svmullt[_s32](svint16_t op1, svint16_t op2) + /// SMULLT Zresult.S, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FN_3A SMULLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smullt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// IF_SVE_FE_3A SMULLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullt, EA_SCALABLE, REG_V4, REG_V5, REG_V2, 2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullt, EA_SCALABLE, REG_V6, REG_V7, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3B SMULLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullt, EA_SCALABLE, REG_V4, REG_V5, REG_V4, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullt, EA_SCALABLE, REG_V6, REG_V7, REG_V6, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyWideningUpper(Vector left, Vector right) => MultiplyWideningUpper(left, right); + + /// + /// svint32_t svmullt_lane[_s32](svint16_t op1, svint16_t op2, uint64_t imm_index) + /// SMULLT Zresult.S, Zop1.H, Zop2.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_FN_3A SMULLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smullt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// IF_SVE_FE_3A SMULLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullt, EA_SCALABLE, REG_V4, REG_V5, REG_V2, 2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullt, EA_SCALABLE, REG_V6, REG_V7, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3B SMULLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullt, EA_SCALABLE, REG_V4, REG_V5, REG_V4, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullt, EA_SCALABLE, REG_V6, REG_V7, REG_V6, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyWideningUpper(Vector op1, Vector op2, ulong imm_index) => MultiplyWideningUpper(op1, op2, imm_index); + + /// + /// svint64_t svmullt[_s64](svint32_t op1, svint32_t op2) + /// SMULLT Zresult.D, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FN_3A SMULLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smullt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// IF_SVE_FE_3A SMULLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullt, EA_SCALABLE, REG_V4, REG_V5, REG_V2, 2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullt, EA_SCALABLE, REG_V6, REG_V7, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3B SMULLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullt, EA_SCALABLE, REG_V4, REG_V5, REG_V4, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullt, EA_SCALABLE, REG_V6, REG_V7, REG_V6, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyWideningUpper(Vector left, Vector right) => MultiplyWideningUpper(left, right); + + /// + /// svint64_t svmullt_lane[_s64](svint32_t op1, svint32_t op2, uint64_t imm_index) + /// SMULLT Zresult.D, Zop1.S, Zop2.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_FN_3A SMULLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_smullt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// IF_SVE_FE_3A SMULLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullt, EA_SCALABLE, REG_V4, REG_V5, REG_V2, 2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullt, EA_SCALABLE, REG_V6, REG_V7, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3B SMULLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullt, EA_SCALABLE, REG_V4, REG_V5, REG_V4, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_smullt, EA_SCALABLE, REG_V6, REG_V7, REG_V6, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyWideningUpper(Vector op1, Vector op2, ulong imm_index) => MultiplyWideningUpper(op1, op2, imm_index); + + /// + /// svuint16_t svmullt[_u16](svuint8_t op1, svuint8_t op2) + /// UMULLT Zresult.H, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FN_3A UMULLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umullt, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D); + /// IF_SVE_FE_3A UMULLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullt, EA_SCALABLE, REG_V12, REG_V13, REG_V6, 6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullt, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3B UMULLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullt, EA_SCALABLE, REG_V12, REG_V13, REG_V12, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullt, EA_SCALABLE, REG_V14, REG_V15, REG_V14, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyWideningUpper(Vector left, Vector right) => MultiplyWideningUpper(left, right); + + /// + /// svuint32_t svmullt[_u32](svuint16_t op1, svuint16_t op2) + /// UMULLT Zresult.S, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FN_3A UMULLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umullt, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D); + /// IF_SVE_FE_3A UMULLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullt, EA_SCALABLE, REG_V12, REG_V13, REG_V6, 6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullt, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3B UMULLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullt, EA_SCALABLE, REG_V12, REG_V13, REG_V12, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullt, EA_SCALABLE, REG_V14, REG_V15, REG_V14, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyWideningUpper(Vector left, Vector right) => MultiplyWideningUpper(left, right); + + /// + /// svuint32_t svmullt_lane[_u32](svuint16_t op1, svuint16_t op2, uint64_t imm_index) + /// UMULLT Zresult.S, Zop1.H, Zop2.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_FN_3A UMULLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umullt, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D); + /// IF_SVE_FE_3A UMULLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullt, EA_SCALABLE, REG_V12, REG_V13, REG_V6, 6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullt, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3B UMULLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullt, EA_SCALABLE, REG_V12, REG_V13, REG_V12, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullt, EA_SCALABLE, REG_V14, REG_V15, REG_V14, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyWideningUpper(Vector op1, Vector op2, ulong imm_index) => MultiplyWideningUpper(op1, op2, imm_index); + + /// + /// svuint64_t svmullt[_u64](svuint32_t op1, svuint32_t op2) + /// UMULLT Zresult.D, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FN_3A UMULLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umullt, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D); + /// IF_SVE_FE_3A UMULLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullt, EA_SCALABLE, REG_V12, REG_V13, REG_V6, 6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullt, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3B UMULLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullt, EA_SCALABLE, REG_V12, REG_V13, REG_V12, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullt, EA_SCALABLE, REG_V14, REG_V15, REG_V14, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyWideningUpper(Vector left, Vector right) => MultiplyWideningUpper(left, right); + + /// + /// svuint64_t svmullt_lane[_u64](svuint32_t op1, svuint32_t op2, uint64_t imm_index) + /// UMULLT Zresult.D, Zop1.S, Zop2.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_FN_3A UMULLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_umullt, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D); + /// IF_SVE_FE_3A UMULLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullt, EA_SCALABLE, REG_V12, REG_V13, REG_V6, 6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullt, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FE_3B UMULLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullt, EA_SCALABLE, REG_V12, REG_V13, REG_V12, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_umullt, EA_SCALABLE, REG_V14, REG_V15, REG_V14, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MultiplyWideningUpper(Vector op1, Vector op2, ulong imm_index) => MultiplyWideningUpper(op1, op2, imm_index); + + + /// NoMatch : Detect no matching elements + + /// + /// svbool_t svnmatch[_s8](svbool_t pg, svint8_t op1, svint8_t op2) + /// NMATCH Presult.B, Pg/Z, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_GE_4A NMATCH ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_nmatch, EA_SCALABLE, REG_P0, REG_P7, REG_V11, REG_V31, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector NoMatch(Vector mask, Vector left, Vector right) => NoMatch(mask, left, right); + + /// + /// svbool_t svnmatch[_s16](svbool_t pg, svint16_t op1, svint16_t op2) + /// NMATCH Presult.H, Pg/Z, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_GE_4A NMATCH ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_nmatch, EA_SCALABLE, REG_P0, REG_P7, REG_V11, REG_V31, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector NoMatch(Vector mask, Vector left, Vector right) => NoMatch(mask, left, right); + + /// + /// svbool_t svnmatch[_u8](svbool_t pg, svuint8_t op1, svuint8_t op2) + /// NMATCH Presult.B, Pg/Z, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_GE_4A NMATCH ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_nmatch, EA_SCALABLE, REG_P0, REG_P7, REG_V11, REG_V31, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector NoMatch(Vector mask, Vector left, Vector right) => NoMatch(mask, left, right); + + /// + /// svbool_t svnmatch[_u16](svbool_t pg, svuint16_t op1, svuint16_t op2) + /// NMATCH Presult.H, Pg/Z, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_GE_4A NMATCH ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_nmatch, EA_SCALABLE, REG_P0, REG_P7, REG_V11, REG_V31, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector NoMatch(Vector mask, Vector left, Vector right) => NoMatch(mask, left, right); + + + /// PolynomialMultiply : Polynomial multiply + + /// + /// svuint8_t svpmul[_u8](svuint8_t op1, svuint8_t op2) + /// PMUL Zresult.B, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_BD_3B PMUL .B, .B, .B + /// theEmitter->emitIns_R_R_R(INS_sve_pmul, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector PolynomialMultiply(Vector left, Vector right) => PolynomialMultiply(left, right); + + + /// PolynomialMultiplyWideningLower : Polynomial multiply long (bottom) + + /// + /// svuint8_t svpmullb_pair[_u8](svuint8_t op1, svuint8_t op2) + /// PMULLB Zresult.H, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FN_3A PMULLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_pmullb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// IF_SVE_FN_3B PMULLB .Q, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_pmullb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q); + /// + public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, Vector right) => PolynomialMultiplyWideningLower(left, right); + + /// + /// svuint16_t svpmullb[_u16](svuint8_t op1, svuint8_t op2) + /// PMULLB Zresult.H, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FN_3A PMULLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_pmullb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// IF_SVE_FN_3B PMULLB .Q, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_pmullb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q); + /// + public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, Vector right) => PolynomialMultiplyWideningLower(left, right); + + /// + /// svuint32_t svpmullb_pair[_u32](svuint32_t op1, svuint32_t op2) + /// PMULLB Zresult.D, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FN_3A PMULLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_pmullb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// IF_SVE_FN_3B PMULLB .Q, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_pmullb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q); + /// + public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, Vector right) => PolynomialMultiplyWideningLower(left, right); + + /// + /// svuint64_t svpmullb[_u64](svuint32_t op1, svuint32_t op2) + /// PMULLB Zresult.D, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FN_3A PMULLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_pmullb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// IF_SVE_FN_3B PMULLB .Q, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_pmullb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q); + /// + public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, Vector right) => PolynomialMultiplyWideningLower(left, right); + + + /// PolynomialMultiplyWideningUpper : Polynomial multiply long (top) + + /// + /// svuint8_t svpmullt_pair[_u8](svuint8_t op1, svuint8_t op2) + /// PMULLT Zresult.H, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FN_3A PMULLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_pmullt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_FN_3B PMULLT .Q, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_pmullt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q); + /// + public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, Vector right) => PolynomialMultiplyWideningUpper(left, right); + + /// + /// svuint16_t svpmullt[_u16](svuint8_t op1, svuint8_t op2) + /// PMULLT Zresult.H, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FN_3A PMULLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_pmullt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_FN_3B PMULLT .Q, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_pmullt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q); + /// + public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, Vector right) => PolynomialMultiplyWideningUpper(left, right); + + /// + /// svuint32_t svpmullt_pair[_u32](svuint32_t op1, svuint32_t op2) + /// PMULLT Zresult.D, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FN_3A PMULLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_pmullt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_FN_3B PMULLT .Q, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_pmullt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q); + /// + public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, Vector right) => PolynomialMultiplyWideningUpper(left, right); + + /// + /// svuint64_t svpmullt[_u64](svuint32_t op1, svuint32_t op2) + /// PMULLT Zresult.D, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FN_3A PMULLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_pmullt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_FN_3B PMULLT .Q, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_pmullt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q); + /// + public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, Vector right) => PolynomialMultiplyWideningUpper(left, right); + + + /// ReciprocalEstimate : Reciprocal estimate + + /// + /// svuint32_t svrecpe[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// URECPE Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; URECPE Zresult.S, Pg/M, Zop.S + /// svuint32_t svrecpe[_u32]_x(svbool_t pg, svuint32_t op) + /// URECPE Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; URECPE Zresult.S, Pg/M, Zop.S + /// svuint32_t svrecpe[_u32]_z(svbool_t pg, svuint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; URECPE Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_ES_3A URECPE .S, /M, .S + /// theEmitter->emitIns_R_R_R(INS_sve_urecpe, EA_SCALABLE, REG_V2, REG_P3, REG_V4, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ReciprocalEstimate(Vector value) => ReciprocalEstimate(value); + + + /// ReciprocalSqrtEstimate : Reciprocal square root estimate + + /// + /// svuint32_t svrsqrte[_u32]_m(svuint32_t inactive, svbool_t pg, svuint32_t op) + /// URSQRTE Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; URSQRTE Zresult.S, Pg/M, Zop.S + /// svuint32_t svrsqrte[_u32]_x(svbool_t pg, svuint32_t op) + /// URSQRTE Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; URSQRTE Zresult.S, Pg/M, Zop.S + /// svuint32_t svrsqrte[_u32]_z(svbool_t pg, svuint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; URSQRTE Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_ES_3A URSQRTE .S, /M, .S + /// theEmitter->emitIns_R_R_R(INS_sve_ursqrte, EA_SCALABLE, REG_V3, REG_P0, REG_V5, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ReciprocalSqrtEstimate(Vector value) => ReciprocalSqrtEstimate(value); + + + /// RoundingAddHighNarowingLower : Rounding add narrow high part (bottom) + + /// + /// svint8_t svraddhnb[_s16](svint16_t op1, svint16_t op2) + /// RADDHNB Zresult.B, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_GC_3A RADDHNB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_raddhnb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector RoundingAddHighNarowingLower(Vector left, Vector right) => RoundingAddHighNarowingLower(left, right); + + /// + /// svint16_t svraddhnb[_s32](svint32_t op1, svint32_t op2) + /// RADDHNB Zresult.H, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_GC_3A RADDHNB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_raddhnb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector RoundingAddHighNarowingLower(Vector left, Vector right) => RoundingAddHighNarowingLower(left, right); + + /// + /// svint32_t svraddhnb[_s64](svint64_t op1, svint64_t op2) + /// RADDHNB Zresult.S, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_GC_3A RADDHNB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_raddhnb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector RoundingAddHighNarowingLower(Vector left, Vector right) => RoundingAddHighNarowingLower(left, right); + + /// + /// svuint8_t svraddhnb[_u16](svuint16_t op1, svuint16_t op2) + /// RADDHNB Zresult.B, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_GC_3A RADDHNB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_raddhnb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector RoundingAddHighNarowingLower(Vector left, Vector right) => RoundingAddHighNarowingLower(left, right); + + /// + /// svuint16_t svraddhnb[_u32](svuint32_t op1, svuint32_t op2) + /// RADDHNB Zresult.H, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_GC_3A RADDHNB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_raddhnb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector RoundingAddHighNarowingLower(Vector left, Vector right) => RoundingAddHighNarowingLower(left, right); + + /// + /// svuint32_t svraddhnb[_u64](svuint64_t op1, svuint64_t op2) + /// RADDHNB Zresult.S, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_GC_3A RADDHNB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_raddhnb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector RoundingAddHighNarowingLower(Vector left, Vector right) => RoundingAddHighNarowingLower(left, right); + + + /// RoundingAddHighNarowingUpper : Rounding add narrow high part (top) + + /// + /// svint8_t svraddhnt[_s16](svint8_t even, svint16_t op1, svint16_t op2) + /// RADDHNT Ztied.B, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_GC_3A RADDHNT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_raddhnt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, Vector right) => RoundingAddHighNarowingUpper(even, left, right); + + /// + /// svint16_t svraddhnt[_s32](svint16_t even, svint32_t op1, svint32_t op2) + /// RADDHNT Ztied.H, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_GC_3A RADDHNT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_raddhnt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, Vector right) => RoundingAddHighNarowingUpper(even, left, right); + + /// + /// svint32_t svraddhnt[_s64](svint32_t even, svint64_t op1, svint64_t op2) + /// RADDHNT Ztied.S, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_GC_3A RADDHNT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_raddhnt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, Vector right) => RoundingAddHighNarowingUpper(even, left, right); + + /// + /// svuint8_t svraddhnt[_u16](svuint8_t even, svuint16_t op1, svuint16_t op2) + /// RADDHNT Ztied.B, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_GC_3A RADDHNT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_raddhnt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, Vector right) => RoundingAddHighNarowingUpper(even, left, right); + + /// + /// svuint16_t svraddhnt[_u32](svuint16_t even, svuint32_t op1, svuint32_t op2) + /// RADDHNT Ztied.H, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_GC_3A RADDHNT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_raddhnt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, Vector right) => RoundingAddHighNarowingUpper(even, left, right); + + /// + /// svuint32_t svraddhnt[_u64](svuint32_t even, svuint64_t op1, svuint64_t op2) + /// RADDHNT Ztied.S, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_GC_3A RADDHNT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_raddhnt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector RoundingAddHighNarowingUpper(Vector even, Vector left, Vector right) => RoundingAddHighNarowingUpper(even, left, right); + + + /// RoundingHalvingAdd : Rounding halving add + + /// + /// svint8_t svrhadd[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// SRHADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; SRHADD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svrhadd[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// SRHADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// SRHADD Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; SRHADD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svrhadd[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; SRHADD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; SRHADD Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_EP_3A SRHADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_srhadd, EA_SCALABLE, REG_V18, REG_P3, REG_V13, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right) => RoundingHalvingAdd(left, right); + + /// + /// svint16_t svrhadd[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// SRHADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; SRHADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svrhadd[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// SRHADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// SRHADD Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; SRHADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svrhadd[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; SRHADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; SRHADD Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_EP_3A SRHADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_srhadd, EA_SCALABLE, REG_V18, REG_P3, REG_V13, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right) => RoundingHalvingAdd(left, right); + + /// + /// svint32_t svrhadd[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// SRHADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; SRHADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svrhadd[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// SRHADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// SRHADD Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; SRHADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svrhadd[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; SRHADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; SRHADD Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_EP_3A SRHADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_srhadd, EA_SCALABLE, REG_V18, REG_P3, REG_V13, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right) => RoundingHalvingAdd(left, right); + + /// + /// svint64_t svrhadd[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// SRHADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; SRHADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svrhadd[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// SRHADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// SRHADD Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; SRHADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svrhadd[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; SRHADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; SRHADD Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_EP_3A SRHADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_srhadd, EA_SCALABLE, REG_V18, REG_P3, REG_V13, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right) => RoundingHalvingAdd(left, right); + + /// + /// svuint8_t svrhadd[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// URHADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; URHADD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svrhadd[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// URHADD Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// URHADD Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; URHADD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svrhadd[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; URHADD Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; URHADD Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_EP_3A URHADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_urhadd, EA_SCALABLE, REG_V22, REG_P7, REG_V17, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right) => RoundingHalvingAdd(left, right); + + /// + /// svuint16_t svrhadd[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// URHADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; URHADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svrhadd[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// URHADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// URHADD Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; URHADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svrhadd[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; URHADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; URHADD Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_EP_3A URHADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_urhadd, EA_SCALABLE, REG_V22, REG_P7, REG_V17, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right) => RoundingHalvingAdd(left, right); + + /// + /// svuint32_t svrhadd[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// URHADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; URHADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svrhadd[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// URHADD Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// URHADD Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; URHADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svrhadd[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; URHADD Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; URHADD Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_EP_3A URHADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_urhadd, EA_SCALABLE, REG_V22, REG_P7, REG_V17, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right) => RoundingHalvingAdd(left, right); + + /// + /// svuint64_t svrhadd[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// URHADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; URHADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svrhadd[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// URHADD Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// URHADD Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; URHADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svrhadd[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; URHADD Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; URHADD Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_EP_3A URHADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_urhadd, EA_SCALABLE, REG_V22, REG_P7, REG_V17, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector RoundingHalvingAdd(Vector left, Vector right) => RoundingHalvingAdd(left, right); + + + /// RoundingSubtractHighNarowingLower : Rounding subtract narrow high part (bottom) + + /// + /// svint8_t svrsubhnb[_s16](svint16_t op1, svint16_t op2) + /// RSUBHNB Zresult.B, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_GC_3A RSUBHNB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_rsubhnb, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, Vector right) => RoundingSubtractHighNarowingLower(left, right); + + /// + /// svint16_t svrsubhnb[_s32](svint32_t op1, svint32_t op2) + /// RSUBHNB Zresult.H, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_GC_3A RSUBHNB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_rsubhnb, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, Vector right) => RoundingSubtractHighNarowingLower(left, right); + + /// + /// svint32_t svrsubhnb[_s64](svint64_t op1, svint64_t op2) + /// RSUBHNB Zresult.S, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_GC_3A RSUBHNB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_rsubhnb, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, Vector right) => RoundingSubtractHighNarowingLower(left, right); + + /// + /// svuint8_t svrsubhnb[_u16](svuint16_t op1, svuint16_t op2) + /// RSUBHNB Zresult.B, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_GC_3A RSUBHNB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_rsubhnb, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, Vector right) => RoundingSubtractHighNarowingLower(left, right); + + /// + /// svuint16_t svrsubhnb[_u32](svuint32_t op1, svuint32_t op2) + /// RSUBHNB Zresult.H, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_GC_3A RSUBHNB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_rsubhnb, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, Vector right) => RoundingSubtractHighNarowingLower(left, right); + + /// + /// svuint32_t svrsubhnb[_u64](svuint64_t op1, svuint64_t op2) + /// RSUBHNB Zresult.S, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_GC_3A RSUBHNB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_rsubhnb, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector RoundingSubtractHighNarowingLower(Vector left, Vector right) => RoundingSubtractHighNarowingLower(left, right); + + + /// RoundingSubtractHighNarowingUpper : Rounding subtract narrow high part (top) + + /// + /// svint8_t svrsubhnt[_s16](svint8_t even, svint16_t op1, svint16_t op2) + /// RSUBHNT Ztied.B, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_GC_3A RSUBHNT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_rsubhnt, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, Vector right) => RoundingSubtractHighNarowingUpper(even, left, right); + + /// + /// svint16_t svrsubhnt[_s32](svint16_t even, svint32_t op1, svint32_t op2) + /// RSUBHNT Ztied.H, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_GC_3A RSUBHNT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_rsubhnt, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, Vector right) => RoundingSubtractHighNarowingUpper(even, left, right); + + /// + /// svint32_t svrsubhnt[_s64](svint32_t even, svint64_t op1, svint64_t op2) + /// RSUBHNT Ztied.S, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_GC_3A RSUBHNT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_rsubhnt, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, Vector right) => RoundingSubtractHighNarowingUpper(even, left, right); + + /// + /// svuint8_t svrsubhnt[_u16](svuint8_t even, svuint16_t op1, svuint16_t op2) + /// RSUBHNT Ztied.B, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_GC_3A RSUBHNT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_rsubhnt, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, Vector right) => RoundingSubtractHighNarowingUpper(even, left, right); + + /// + /// svuint16_t svrsubhnt[_u32](svuint16_t even, svuint32_t op1, svuint32_t op2) + /// RSUBHNT Ztied.H, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_GC_3A RSUBHNT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_rsubhnt, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, Vector right) => RoundingSubtractHighNarowingUpper(even, left, right); + + /// + /// svuint32_t svrsubhnt[_u64](svuint32_t even, svuint64_t op1, svuint64_t op2) + /// RSUBHNT Ztied.S, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_GC_3A RSUBHNT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_rsubhnt, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector RoundingSubtractHighNarowingUpper(Vector even, Vector left, Vector right) => RoundingSubtractHighNarowingUpper(even, left, right); + + + /// SaturatingAbs : Saturating absolute value + + /// + /// svint8_t svqabs[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) + /// SQABS Ztied.B, Pg/M, Zop.B + /// MOVPRFX Zresult, Zinactive; SQABS Zresult.B, Pg/M, Zop.B + /// svint8_t svqabs[_s8]_x(svbool_t pg, svint8_t op) + /// SQABS Ztied.B, Pg/M, Ztied.B + /// MOVPRFX Zresult, Zop; SQABS Zresult.B, Pg/M, Zop.B + /// svint8_t svqabs[_s8]_z(svbool_t pg, svint8_t op) + /// MOVPRFX Zresult.B, Pg/Z, Zop.B; SQABS Zresult.B, Pg/M, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_ES_3A SQABS ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_sqabs, EA_SCALABLE, REG_V29, REG_P7, REG_V0, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SaturatingAbs(Vector value) => SaturatingAbs(value); + + /// + /// svint16_t svqabs[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) + /// SQABS Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; SQABS Zresult.H, Pg/M, Zop.H + /// svint16_t svqabs[_s16]_x(svbool_t pg, svint16_t op) + /// SQABS Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; SQABS Zresult.H, Pg/M, Zop.H + /// svint16_t svqabs[_s16]_z(svbool_t pg, svint16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; SQABS Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_ES_3A SQABS ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_sqabs, EA_SCALABLE, REG_V29, REG_P7, REG_V0, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SaturatingAbs(Vector value) => SaturatingAbs(value); + + /// + /// svint32_t svqabs[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// SQABS Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; SQABS Zresult.S, Pg/M, Zop.S + /// svint32_t svqabs[_s32]_x(svbool_t pg, svint32_t op) + /// SQABS Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; SQABS Zresult.S, Pg/M, Zop.S + /// svint32_t svqabs[_s32]_z(svbool_t pg, svint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; SQABS Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_ES_3A SQABS ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_sqabs, EA_SCALABLE, REG_V29, REG_P7, REG_V0, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SaturatingAbs(Vector value) => SaturatingAbs(value); + + /// + /// svint64_t svqabs[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// SQABS Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; SQABS Zresult.D, Pg/M, Zop.D + /// svint64_t svqabs[_s64]_x(svbool_t pg, svint64_t op) + /// SQABS Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; SQABS Zresult.D, Pg/M, Zop.D + /// svint64_t svqabs[_s64]_z(svbool_t pg, svint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; SQABS Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_ES_3A SQABS ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_sqabs, EA_SCALABLE, REG_V29, REG_P7, REG_V0, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SaturatingAbs(Vector value) => SaturatingAbs(value); + + + /// SaturatingComplexAddRotate : Saturating complex add with rotate + + /// + /// svint8_t svqcadd[_s8](svint8_t op1, svint8_t op2, uint64_t imm_rotation) + /// SQCADD Ztied1.B, Ztied1.B, Zop2.B, #imm_rotation + /// MOVPRFX Zresult, Zop1; SQCADD Zresult.B, Zresult.B, Zop2.B, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_FV_2A SQCADD ., ., ., + /// theEmitter->emitIns_R_R_I(INS_sve_sqcadd, EA_SCALABLE, REG_V8, REG_V9, 270, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sqcadd, EA_SCALABLE, REG_V10, REG_V11, 270, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sqcadd, EA_SCALABLE, REG_V12, REG_V13, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sqcadd, EA_SCALABLE, REG_V14, REG_V15, 90, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingComplexAddRotate(Vector op1, Vector op2, [ConstantExpected] byte rotation) => SaturatingComplexAddRotate(op1, op2, rotation); + + /// + /// svint16_t svqcadd[_s16](svint16_t op1, svint16_t op2, uint64_t imm_rotation) + /// SQCADD Ztied1.H, Ztied1.H, Zop2.H, #imm_rotation + /// MOVPRFX Zresult, Zop1; SQCADD Zresult.H, Zresult.H, Zop2.H, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_FV_2A SQCADD ., ., ., + /// theEmitter->emitIns_R_R_I(INS_sve_sqcadd, EA_SCALABLE, REG_V8, REG_V9, 270, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sqcadd, EA_SCALABLE, REG_V10, REG_V11, 270, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sqcadd, EA_SCALABLE, REG_V12, REG_V13, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sqcadd, EA_SCALABLE, REG_V14, REG_V15, 90, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingComplexAddRotate(Vector op1, Vector op2, [ConstantExpected] byte rotation) => SaturatingComplexAddRotate(op1, op2, rotation); + + /// + /// svint32_t svqcadd[_s32](svint32_t op1, svint32_t op2, uint64_t imm_rotation) + /// SQCADD Ztied1.S, Ztied1.S, Zop2.S, #imm_rotation + /// MOVPRFX Zresult, Zop1; SQCADD Zresult.S, Zresult.S, Zop2.S, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_FV_2A SQCADD ., ., ., + /// theEmitter->emitIns_R_R_I(INS_sve_sqcadd, EA_SCALABLE, REG_V8, REG_V9, 270, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sqcadd, EA_SCALABLE, REG_V10, REG_V11, 270, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sqcadd, EA_SCALABLE, REG_V12, REG_V13, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sqcadd, EA_SCALABLE, REG_V14, REG_V15, 90, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingComplexAddRotate(Vector op1, Vector op2, [ConstantExpected] byte rotation) => SaturatingComplexAddRotate(op1, op2, rotation); + + /// + /// svint64_t svqcadd[_s64](svint64_t op1, svint64_t op2, uint64_t imm_rotation) + /// SQCADD Ztied1.D, Ztied1.D, Zop2.D, #imm_rotation + /// MOVPRFX Zresult, Zop1; SQCADD Zresult.D, Zresult.D, Zop2.D, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_FV_2A SQCADD ., ., ., + /// theEmitter->emitIns_R_R_I(INS_sve_sqcadd, EA_SCALABLE, REG_V8, REG_V9, 270, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sqcadd, EA_SCALABLE, REG_V10, REG_V11, 270, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sqcadd, EA_SCALABLE, REG_V12, REG_V13, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sqcadd, EA_SCALABLE, REG_V14, REG_V15, 90, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingComplexAddRotate(Vector op1, Vector op2, [ConstantExpected] byte rotation) => SaturatingComplexAddRotate(op1, op2, rotation); + + + /// SaturatingDoublingMultiplyAddWideningLower : Saturating doubling multiply-add long (bottom) + + /// + /// svint16_t svqdmlalb[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// SQDMLALB Ztied1.H, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; SQDMLALB Zresult.H, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_EO_3A SQDMLALB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3A SQDMLALB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3B SQDMLALB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlalb, EA_SCALABLE, REG_V8, REG_V9, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplyAddWideningLower(op1, op2, op3); + + /// + /// svint32_t svqdmlalb[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// SQDMLALB Ztied1.S, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; SQDMLALB Zresult.S, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_EO_3A SQDMLALB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3A SQDMLALB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3B SQDMLALB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlalb, EA_SCALABLE, REG_V8, REG_V9, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplyAddWideningLower(op1, op2, op3); + + /// + /// svint32_t svqdmlalb_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// SQDMLALB Ztied1.S, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; SQDMLALB Zresult.S, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EO_3A SQDMLALB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3A SQDMLALB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3B SQDMLALB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlalb, EA_SCALABLE, REG_V8, REG_V9, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingDoublingMultiplyAddWideningLower(op1, op2, op3, imm_index); + + /// + /// svint64_t svqdmlalb[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// SQDMLALB Ztied1.D, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; SQDMLALB Zresult.D, Zop2.S, Zop3.S + /// + /// codegenarm64test: + /// IF_SVE_EO_3A SQDMLALB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3A SQDMLALB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3B SQDMLALB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlalb, EA_SCALABLE, REG_V8, REG_V9, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplyAddWideningLower(op1, op2, op3); + + /// + /// svint64_t svqdmlalb_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// SQDMLALB Ztied1.D, Zop2.S, Zop3.S[imm_index] + /// MOVPRFX Zresult, Zop1; SQDMLALB Zresult.D, Zop2.S, Zop3.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EO_3A SQDMLALB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3A SQDMLALB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3B SQDMLALB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlalb, EA_SCALABLE, REG_V8, REG_V9, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingDoublingMultiplyAddWideningLower(op1, op2, op3, imm_index); + + + /// SaturatingDoublingMultiplyAddWideningLowerUpper : Saturating doubling multiply-add long (bottom × top) + + /// + /// svint16_t svqdmlalbt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// SQDMLALBT Ztied1.H, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; SQDMLALBT Zresult.H, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_EN_3A SQDMLALBT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlalbt, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLowerUpper(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplyAddWideningLowerUpper(op1, op2, op3); + + /// + /// svint32_t svqdmlalbt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// SQDMLALBT Ztied1.S, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; SQDMLALBT Zresult.S, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_EN_3A SQDMLALBT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlalbt, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLowerUpper(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplyAddWideningLowerUpper(op1, op2, op3); + + /// + /// svint64_t svqdmlalbt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// SQDMLALBT Ztied1.D, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; SQDMLALBT Zresult.D, Zop2.S, Zop3.S + /// + /// codegenarm64test: + /// IF_SVE_EN_3A SQDMLALBT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlalbt, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningLowerUpper(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplyAddWideningLowerUpper(op1, op2, op3); + + + /// SaturatingDoublingMultiplyAddWideningUpper : Saturating doubling multiply-add long (top) + + /// + /// svint16_t svqdmlalt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// SQDMLALT Ztied1.H, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; SQDMLALT Zresult.H, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_EO_3A SQDMLALT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlalt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// IF_SVE_FJ_3A SQDMLALT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlalt, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3B SQDMLALT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlalt, EA_SCALABLE, REG_V10, REG_V11, REG_V5, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplyAddWideningUpper(op1, op2, op3); + + /// + /// svint32_t svqdmlalt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// SQDMLALT Ztied1.S, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; SQDMLALT Zresult.S, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_EO_3A SQDMLALT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlalt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// IF_SVE_FJ_3A SQDMLALT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlalt, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3B SQDMLALT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlalt, EA_SCALABLE, REG_V10, REG_V11, REG_V5, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplyAddWideningUpper(op1, op2, op3); + + /// + /// svint32_t svqdmlalt_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// SQDMLALT Ztied1.S, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; SQDMLALT Zresult.S, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EO_3A SQDMLALT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlalt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// IF_SVE_FJ_3A SQDMLALT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlalt, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3B SQDMLALT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlalt, EA_SCALABLE, REG_V10, REG_V11, REG_V5, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingDoublingMultiplyAddWideningUpper(op1, op2, op3, imm_index); + + /// + /// svint64_t svqdmlalt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// SQDMLALT Ztied1.D, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; SQDMLALT Zresult.D, Zop2.S, Zop3.S + /// + /// codegenarm64test: + /// IF_SVE_EO_3A SQDMLALT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlalt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// IF_SVE_FJ_3A SQDMLALT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlalt, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3B SQDMLALT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlalt, EA_SCALABLE, REG_V10, REG_V11, REG_V5, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplyAddWideningUpper(op1, op2, op3); + + /// + /// svint64_t svqdmlalt_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// SQDMLALT Ztied1.D, Zop2.S, Zop3.S[imm_index] + /// MOVPRFX Zresult, Zop1; SQDMLALT Zresult.D, Zop2.S, Zop3.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EO_3A SQDMLALT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlalt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// IF_SVE_FJ_3A SQDMLALT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlalt, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3B SQDMLALT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlalt, EA_SCALABLE, REG_V10, REG_V11, REG_V5, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingDoublingMultiplyAddWideningUpper(op1, op2, op3, imm_index); + + + /// SaturatingDoublingMultiplyHigh : Saturating doubling multiply high + + /// + /// svint8_t svqdmulh[_s8](svint8_t op1, svint8_t op2) + /// SQDMULH Zresult.B, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_BE_3A SQDMULH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmulh, EA_SCALABLE, REG_V7, REG_V28, REG_V0, INS_OPTS_SCALABLE_B); + /// IF_SVE_FI_3A SQDMULH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FI_3B SQDMULH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V8, REG_V9, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V10, REG_V11, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FI_3C SQDMULH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector left, Vector right) => SaturatingDoublingMultiplyHigh(left, right); + + /// + /// svint16_t svqdmulh[_s16](svint16_t op1, svint16_t op2) + /// SQDMULH Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_BE_3A SQDMULH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmulh, EA_SCALABLE, REG_V7, REG_V28, REG_V0, INS_OPTS_SCALABLE_B); + /// IF_SVE_FI_3A SQDMULH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FI_3B SQDMULH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V8, REG_V9, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V10, REG_V11, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FI_3C SQDMULH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector left, Vector right) => SaturatingDoublingMultiplyHigh(left, right); + + /// + /// svint16_t svqdmulh_lane[_s16](svint16_t op1, svint16_t op2, uint64_t imm_index) + /// SQDMULH Zresult.H, Zop1.H, Zop2.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_BE_3A SQDMULH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmulh, EA_SCALABLE, REG_V7, REG_V28, REG_V0, INS_OPTS_SCALABLE_B); + /// IF_SVE_FI_3A SQDMULH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FI_3B SQDMULH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V8, REG_V9, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V10, REG_V11, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FI_3C SQDMULH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector op1, Vector op2, ulong imm_index) => SaturatingDoublingMultiplyHigh(op1, op2, imm_index); + + /// + /// svint32_t svqdmulh[_s32](svint32_t op1, svint32_t op2) + /// SQDMULH Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_BE_3A SQDMULH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmulh, EA_SCALABLE, REG_V7, REG_V28, REG_V0, INS_OPTS_SCALABLE_B); + /// IF_SVE_FI_3A SQDMULH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FI_3B SQDMULH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V8, REG_V9, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V10, REG_V11, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FI_3C SQDMULH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector left, Vector right) => SaturatingDoublingMultiplyHigh(left, right); + + /// + /// svint32_t svqdmulh_lane[_s32](svint32_t op1, svint32_t op2, uint64_t imm_index) + /// SQDMULH Zresult.S, Zop1.S, Zop2.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_BE_3A SQDMULH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmulh, EA_SCALABLE, REG_V7, REG_V28, REG_V0, INS_OPTS_SCALABLE_B); + /// IF_SVE_FI_3A SQDMULH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FI_3B SQDMULH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V8, REG_V9, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V10, REG_V11, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FI_3C SQDMULH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector op1, Vector op2, ulong imm_index) => SaturatingDoublingMultiplyHigh(op1, op2, imm_index); + + /// + /// svint64_t svqdmulh[_s64](svint64_t op1, svint64_t op2) + /// SQDMULH Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_BE_3A SQDMULH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmulh, EA_SCALABLE, REG_V7, REG_V28, REG_V0, INS_OPTS_SCALABLE_B); + /// IF_SVE_FI_3A SQDMULH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FI_3B SQDMULH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V8, REG_V9, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V10, REG_V11, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FI_3C SQDMULH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector left, Vector right) => SaturatingDoublingMultiplyHigh(left, right); + + /// + /// svint64_t svqdmulh_lane[_s64](svint64_t op1, svint64_t op2, uint64_t imm_index) + /// SQDMULH Zresult.D, Zop1.D, Zop2.D[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_BE_3A SQDMULH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmulh, EA_SCALABLE, REG_V7, REG_V28, REG_V0, INS_OPTS_SCALABLE_B); + /// IF_SVE_FI_3A SQDMULH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FI_3B SQDMULH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V8, REG_V9, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V10, REG_V11, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FI_3C SQDMULH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmulh, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingDoublingMultiplyHigh(Vector op1, Vector op2, ulong imm_index) => SaturatingDoublingMultiplyHigh(op1, op2, imm_index); + + + /// SaturatingDoublingMultiplySubtractWideningLower : Saturating doubling multiply-subtract long (bottom) + + /// + /// svint16_t svqdmlslb[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// SQDMLSLB Ztied1.H, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; SQDMLSLB Zresult.H, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_EO_3A SQDMLSLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlslb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// IF_SVE_FJ_3A SQDMLSLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlslb, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3B SQDMLSLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlslb, EA_SCALABLE, REG_V12, REG_V13, REG_V10, 2, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplySubtractWideningLower(op1, op2, op3); + + /// + /// svint32_t svqdmlslb[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// SQDMLSLB Ztied1.S, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; SQDMLSLB Zresult.S, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_EO_3A SQDMLSLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlslb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// IF_SVE_FJ_3A SQDMLSLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlslb, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3B SQDMLSLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlslb, EA_SCALABLE, REG_V12, REG_V13, REG_V10, 2, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplySubtractWideningLower(op1, op2, op3); + + /// + /// svint32_t svqdmlslb_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// SQDMLSLB Ztied1.S, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; SQDMLSLB Zresult.S, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EO_3A SQDMLSLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlslb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// IF_SVE_FJ_3A SQDMLSLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlslb, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3B SQDMLSLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlslb, EA_SCALABLE, REG_V12, REG_V13, REG_V10, 2, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingDoublingMultiplySubtractWideningLower(op1, op2, op3, imm_index); + + /// + /// svint64_t svqdmlslb[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// SQDMLSLB Ztied1.D, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; SQDMLSLB Zresult.D, Zop2.S, Zop3.S + /// + /// codegenarm64test: + /// IF_SVE_EO_3A SQDMLSLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlslb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// IF_SVE_FJ_3A SQDMLSLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlslb, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3B SQDMLSLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlslb, EA_SCALABLE, REG_V12, REG_V13, REG_V10, 2, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplySubtractWideningLower(op1, op2, op3); + + /// + /// svint64_t svqdmlslb_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// SQDMLSLB Ztied1.D, Zop2.S, Zop3.S[imm_index] + /// MOVPRFX Zresult, Zop1; SQDMLSLB Zresult.D, Zop2.S, Zop3.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EO_3A SQDMLSLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlslb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// IF_SVE_FJ_3A SQDMLSLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlslb, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3B SQDMLSLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlslb, EA_SCALABLE, REG_V12, REG_V13, REG_V10, 2, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingDoublingMultiplySubtractWideningLower(op1, op2, op3, imm_index); + + + /// SaturatingDoublingMultiplySubtractWideningLowerUpper : Saturating doubling multiply-subtract long (bottom × top) + + /// + /// svint16_t svqdmlslbt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// SQDMLSLBT Ztied1.H, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; SQDMLSLBT Zresult.H, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_EN_3A SQDMLSLBT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlslbt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlslbt, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLowerUpper(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplySubtractWideningLowerUpper(op1, op2, op3); + + /// + /// svint32_t svqdmlslbt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// SQDMLSLBT Ztied1.S, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; SQDMLSLBT Zresult.S, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_EN_3A SQDMLSLBT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlslbt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlslbt, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLowerUpper(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplySubtractWideningLowerUpper(op1, op2, op3); + + /// + /// svint64_t svqdmlslbt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// SQDMLSLBT Ztied1.D, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; SQDMLSLBT Zresult.D, Zop2.S, Zop3.S + /// + /// codegenarm64test: + /// IF_SVE_EN_3A SQDMLSLBT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlslbt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlslbt, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningLowerUpper(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplySubtractWideningLowerUpper(op1, op2, op3); + + + /// SaturatingDoublingMultiplySubtractWideningUpper : Saturating doubling multiply-subtract long (top) + + /// + /// svint16_t svqdmlslt[_s16](svint16_t op1, svint8_t op2, svint8_t op3) + /// SQDMLSLT Ztied1.H, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; SQDMLSLT Zresult.H, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_EO_3A SQDMLSLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlslt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3A SQDMLSLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlslt, EA_SCALABLE, REG_V6, REG_V0, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3B SQDMLSLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlslt, EA_SCALABLE, REG_V14, REG_V15, REG_V15, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplySubtractWideningUpper(op1, op2, op3); + + /// + /// svint32_t svqdmlslt[_s32](svint32_t op1, svint16_t op2, svint16_t op3) + /// SQDMLSLT Ztied1.S, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; SQDMLSLT Zresult.S, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_EO_3A SQDMLSLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlslt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3A SQDMLSLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlslt, EA_SCALABLE, REG_V6, REG_V0, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3B SQDMLSLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlslt, EA_SCALABLE, REG_V14, REG_V15, REG_V15, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplySubtractWideningUpper(op1, op2, op3); + + /// + /// svint32_t svqdmlslt_lane[_s32](svint32_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// SQDMLSLT Ztied1.S, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; SQDMLSLT Zresult.S, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EO_3A SQDMLSLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlslt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3A SQDMLSLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlslt, EA_SCALABLE, REG_V6, REG_V0, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3B SQDMLSLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlslt, EA_SCALABLE, REG_V14, REG_V15, REG_V15, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingDoublingMultiplySubtractWideningUpper(op1, op2, op3, imm_index); + + /// + /// svint64_t svqdmlslt[_s64](svint64_t op1, svint32_t op2, svint32_t op3) + /// SQDMLSLT Ztied1.D, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; SQDMLSLT Zresult.D, Zop2.S, Zop3.S + /// + /// codegenarm64test: + /// IF_SVE_EO_3A SQDMLSLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlslt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3A SQDMLSLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlslt, EA_SCALABLE, REG_V6, REG_V0, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3B SQDMLSLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlslt, EA_SCALABLE, REG_V14, REG_V15, REG_V15, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) => SaturatingDoublingMultiplySubtractWideningUpper(op1, op2, op3); + + /// + /// svint64_t svqdmlslt_lane[_s64](svint64_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// SQDMLSLT Ztied1.D, Zop2.S, Zop3.S[imm_index] + /// MOVPRFX Zresult, Zop1; SQDMLSLT Zresult.D, Zop2.S, Zop3.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EO_3A SQDMLSLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmlslt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3A SQDMLSLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlslt, EA_SCALABLE, REG_V6, REG_V0, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FJ_3B SQDMLSLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmlslt, EA_SCALABLE, REG_V14, REG_V15, REG_V15, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingDoublingMultiplySubtractWideningUpper(op1, op2, op3, imm_index); + + + /// SaturatingDoublingMultiplyWideningLower : Saturating doubling multiply long (bottom) + + /// + /// svint16_t svqdmullb[_s16](svint8_t op1, svint8_t op2) + /// SQDMULLB Zresult.H, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FN_3A SQDMULLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmullb, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_H); + /// IF_SVE_FH_3A SQDMULLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullb, EA_SCALABLE, REG_V0, REG_V2, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullb, EA_SCALABLE, REG_V4, REG_V6, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FH_3B SQDMULLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullb, EA_SCALABLE, REG_V0, REG_V2, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullb, EA_SCALABLE, REG_V4, REG_V6, REG_V5, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningLower(Vector left, Vector right) => SaturatingDoublingMultiplyWideningLower(left, right); + + /// + /// svint32_t svqdmullb[_s32](svint16_t op1, svint16_t op2) + /// SQDMULLB Zresult.S, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FN_3A SQDMULLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmullb, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_H); + /// IF_SVE_FH_3A SQDMULLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullb, EA_SCALABLE, REG_V0, REG_V2, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullb, EA_SCALABLE, REG_V4, REG_V6, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FH_3B SQDMULLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullb, EA_SCALABLE, REG_V0, REG_V2, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullb, EA_SCALABLE, REG_V4, REG_V6, REG_V5, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningLower(Vector left, Vector right) => SaturatingDoublingMultiplyWideningLower(left, right); + + /// + /// svint32_t svqdmullb_lane[_s32](svint16_t op1, svint16_t op2, uint64_t imm_index) + /// SQDMULLB Zresult.S, Zop1.H, Zop2.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_FN_3A SQDMULLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmullb, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_H); + /// IF_SVE_FH_3A SQDMULLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullb, EA_SCALABLE, REG_V0, REG_V2, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullb, EA_SCALABLE, REG_V4, REG_V6, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FH_3B SQDMULLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullb, EA_SCALABLE, REG_V0, REG_V2, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullb, EA_SCALABLE, REG_V4, REG_V6, REG_V5, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningLower(Vector op1, Vector op2, ulong imm_index) => SaturatingDoublingMultiplyWideningLower(op1, op2, imm_index); + + /// + /// svint64_t svqdmullb[_s64](svint32_t op1, svint32_t op2) + /// SQDMULLB Zresult.D, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FN_3A SQDMULLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmullb, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_H); + /// IF_SVE_FH_3A SQDMULLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullb, EA_SCALABLE, REG_V0, REG_V2, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullb, EA_SCALABLE, REG_V4, REG_V6, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FH_3B SQDMULLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullb, EA_SCALABLE, REG_V0, REG_V2, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullb, EA_SCALABLE, REG_V4, REG_V6, REG_V5, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningLower(Vector left, Vector right) => SaturatingDoublingMultiplyWideningLower(left, right); + + /// + /// svint64_t svqdmullb_lane[_s64](svint32_t op1, svint32_t op2, uint64_t imm_index) + /// SQDMULLB Zresult.D, Zop1.S, Zop2.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_FN_3A SQDMULLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmullb, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_H); + /// IF_SVE_FH_3A SQDMULLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullb, EA_SCALABLE, REG_V0, REG_V2, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullb, EA_SCALABLE, REG_V4, REG_V6, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FH_3B SQDMULLB .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullb, EA_SCALABLE, REG_V0, REG_V2, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullb, EA_SCALABLE, REG_V4, REG_V6, REG_V5, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningLower(Vector op1, Vector op2, ulong imm_index) => SaturatingDoublingMultiplyWideningLower(op1, op2, imm_index); + + + /// SaturatingDoublingMultiplyWideningUpper : Saturating doubling multiply long (top) + + /// + /// svint16_t svqdmullt[_s16](svint8_t op1, svint8_t op2) + /// SQDMULLT Zresult.H, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FN_3A SQDMULLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmullt, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_D); + /// IF_SVE_FH_3A SQDMULLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullt, EA_SCALABLE, REG_V8, REG_V10, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullt, EA_SCALABLE, REG_V12, REG_V14, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FH_3B SQDMULLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullt, EA_SCALABLE, REG_V8, REG_V10, REG_V10, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullt, EA_SCALABLE, REG_V12, REG_V14, REG_V15, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningUpper(Vector left, Vector right) => SaturatingDoublingMultiplyWideningUpper(left, right); + + /// + /// svint32_t svqdmullt[_s32](svint16_t op1, svint16_t op2) + /// SQDMULLT Zresult.S, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FN_3A SQDMULLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmullt, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_D); + /// IF_SVE_FH_3A SQDMULLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullt, EA_SCALABLE, REG_V8, REG_V10, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullt, EA_SCALABLE, REG_V12, REG_V14, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FH_3B SQDMULLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullt, EA_SCALABLE, REG_V8, REG_V10, REG_V10, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullt, EA_SCALABLE, REG_V12, REG_V14, REG_V15, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningUpper(Vector left, Vector right) => SaturatingDoublingMultiplyWideningUpper(left, right); + + /// + /// svint32_t svqdmullt_lane[_s32](svint16_t op1, svint16_t op2, uint64_t imm_index) + /// SQDMULLT Zresult.S, Zop1.H, Zop2.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_FN_3A SQDMULLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmullt, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_D); + /// IF_SVE_FH_3A SQDMULLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullt, EA_SCALABLE, REG_V8, REG_V10, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullt, EA_SCALABLE, REG_V12, REG_V14, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FH_3B SQDMULLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullt, EA_SCALABLE, REG_V8, REG_V10, REG_V10, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullt, EA_SCALABLE, REG_V12, REG_V14, REG_V15, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningUpper(Vector op1, Vector op2, ulong imm_index) => SaturatingDoublingMultiplyWideningUpper(op1, op2, imm_index); + + /// + /// svint64_t svqdmullt[_s64](svint32_t op1, svint32_t op2) + /// SQDMULLT Zresult.D, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FN_3A SQDMULLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmullt, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_D); + /// IF_SVE_FH_3A SQDMULLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullt, EA_SCALABLE, REG_V8, REG_V10, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullt, EA_SCALABLE, REG_V12, REG_V14, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FH_3B SQDMULLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullt, EA_SCALABLE, REG_V8, REG_V10, REG_V10, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullt, EA_SCALABLE, REG_V12, REG_V14, REG_V15, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningUpper(Vector left, Vector right) => SaturatingDoublingMultiplyWideningUpper(left, right); + + /// + /// svint64_t svqdmullt_lane[_s64](svint32_t op1, svint32_t op2, uint64_t imm_index) + /// SQDMULLT Zresult.D, Zop1.S, Zop2.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_FN_3A SQDMULLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqdmullt, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_D); + /// IF_SVE_FH_3A SQDMULLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullt, EA_SCALABLE, REG_V8, REG_V10, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullt, EA_SCALABLE, REG_V12, REG_V14, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FH_3B SQDMULLT .D, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullt, EA_SCALABLE, REG_V8, REG_V10, REG_V10, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqdmullt, EA_SCALABLE, REG_V12, REG_V14, REG_V15, 3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingDoublingMultiplyWideningUpper(Vector op1, Vector op2, ulong imm_index) => SaturatingDoublingMultiplyWideningUpper(op1, op2, imm_index); + + + /// SaturatingExtractNarrowingLower : Saturating extract narrow (bottom) + + /// + /// svint8_t svqxtnb[_s16](svint16_t op) + /// SQXTNB Zresult.B, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_GD_2A SQXTNB ., + /// theEmitter->emitIns_R_R(INS_sve_sqxtnb, EA_SCALABLE, REG_V0, REG_V5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqxtnb, EA_SCALABLE, REG_V0, REG_V0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqxtnb, EA_SCALABLE, REG_V7, REG_V7, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingExtractNarrowingLower(Vector value) => SaturatingExtractNarrowingLower(value); + + /// + /// svint16_t svqxtnb[_s32](svint32_t op) + /// SQXTNB Zresult.H, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_GD_2A SQXTNB ., + /// theEmitter->emitIns_R_R(INS_sve_sqxtnb, EA_SCALABLE, REG_V0, REG_V5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqxtnb, EA_SCALABLE, REG_V0, REG_V0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqxtnb, EA_SCALABLE, REG_V7, REG_V7, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingExtractNarrowingLower(Vector value) => SaturatingExtractNarrowingLower(value); + + /// + /// svint32_t svqxtnb[_s64](svint64_t op) + /// SQXTNB Zresult.S, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_GD_2A SQXTNB ., + /// theEmitter->emitIns_R_R(INS_sve_sqxtnb, EA_SCALABLE, REG_V0, REG_V5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqxtnb, EA_SCALABLE, REG_V0, REG_V0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqxtnb, EA_SCALABLE, REG_V7, REG_V7, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingExtractNarrowingLower(Vector value) => SaturatingExtractNarrowingLower(value); + + /// + /// svuint8_t svqxtnb[_u16](svuint16_t op) + /// UQXTNB Zresult.B, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_GD_2A UQXTNB ., + /// theEmitter->emitIns_R_R(INS_sve_uqxtnb, EA_SCALABLE, REG_V0, REG_V7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqxtnb, EA_SCALABLE, REG_V6, REG_V2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqxtnb, EA_SCALABLE, REG_V3, REG_V9, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingExtractNarrowingLower(Vector value) => SaturatingExtractNarrowingLower(value); + + /// + /// svuint16_t svqxtnb[_u32](svuint32_t op) + /// UQXTNB Zresult.H, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_GD_2A UQXTNB ., + /// theEmitter->emitIns_R_R(INS_sve_uqxtnb, EA_SCALABLE, REG_V0, REG_V7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqxtnb, EA_SCALABLE, REG_V6, REG_V2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqxtnb, EA_SCALABLE, REG_V3, REG_V9, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingExtractNarrowingLower(Vector value) => SaturatingExtractNarrowingLower(value); + + /// + /// svuint32_t svqxtnb[_u64](svuint64_t op) + /// UQXTNB Zresult.S, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_GD_2A UQXTNB ., + /// theEmitter->emitIns_R_R(INS_sve_uqxtnb, EA_SCALABLE, REG_V0, REG_V7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqxtnb, EA_SCALABLE, REG_V6, REG_V2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqxtnb, EA_SCALABLE, REG_V3, REG_V9, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingExtractNarrowingLower(Vector value) => SaturatingExtractNarrowingLower(value); + + + /// SaturatingExtractNarrowingUpper : Saturating extract narrow (top) + + /// + /// svint8_t svqxtnt[_s16](svint8_t even, svint16_t op) + /// SQXTNT Ztied.B, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_GD_2A SQXTNT ., + /// theEmitter->emitIns_R_R(INS_sve_sqxtnt, EA_SCALABLE, REG_V3, REG_V7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqxtnt, EA_SCALABLE, REG_V9, REG_V9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqxtnt, EA_SCALABLE, REG_V0, REG_V8, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingExtractNarrowingUpper(Vector even, Vector op) => SaturatingExtractNarrowingUpper(even, op); + + /// + /// svint16_t svqxtnt[_s32](svint16_t even, svint32_t op) + /// SQXTNT Ztied.H, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_GD_2A SQXTNT ., + /// theEmitter->emitIns_R_R(INS_sve_sqxtnt, EA_SCALABLE, REG_V3, REG_V7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqxtnt, EA_SCALABLE, REG_V9, REG_V9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqxtnt, EA_SCALABLE, REG_V0, REG_V8, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingExtractNarrowingUpper(Vector even, Vector op) => SaturatingExtractNarrowingUpper(even, op); + + /// + /// svint32_t svqxtnt[_s64](svint32_t even, svint64_t op) + /// SQXTNT Ztied.S, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_GD_2A SQXTNT ., + /// theEmitter->emitIns_R_R(INS_sve_sqxtnt, EA_SCALABLE, REG_V3, REG_V7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqxtnt, EA_SCALABLE, REG_V9, REG_V9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqxtnt, EA_SCALABLE, REG_V0, REG_V8, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingExtractNarrowingUpper(Vector even, Vector op) => SaturatingExtractNarrowingUpper(even, op); + + /// + /// svuint8_t svqxtnt[_u16](svuint8_t even, svuint16_t op) + /// UQXTNT Ztied.B, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_GD_2A UQXTNT ., + /// theEmitter->emitIns_R_R(INS_sve_uqxtnt, EA_SCALABLE, REG_V0, REG_V1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqxtnt, EA_SCALABLE, REG_V0, REG_V1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqxtnt, EA_SCALABLE, REG_V0, REG_V3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingExtractNarrowingUpper(Vector even, Vector op) => SaturatingExtractNarrowingUpper(even, op); + + /// + /// svuint16_t svqxtnt[_u32](svuint16_t even, svuint32_t op) + /// UQXTNT Ztied.H, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_GD_2A UQXTNT ., + /// theEmitter->emitIns_R_R(INS_sve_uqxtnt, EA_SCALABLE, REG_V0, REG_V1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqxtnt, EA_SCALABLE, REG_V0, REG_V1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqxtnt, EA_SCALABLE, REG_V0, REG_V3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingExtractNarrowingUpper(Vector even, Vector op) => SaturatingExtractNarrowingUpper(even, op); + + /// + /// svuint32_t svqxtnt[_u64](svuint32_t even, svuint64_t op) + /// UQXTNT Ztied.S, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_GD_2A UQXTNT ., + /// theEmitter->emitIns_R_R(INS_sve_uqxtnt, EA_SCALABLE, REG_V0, REG_V1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_uqxtnt, EA_SCALABLE, REG_V0, REG_V1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_uqxtnt, EA_SCALABLE, REG_V0, REG_V3, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingExtractNarrowingUpper(Vector even, Vector op) => SaturatingExtractNarrowingUpper(even, op); + + + /// SaturatingExtractUnsignedNarrowingLower : Saturating extract unsigned narrow (bottom) + + /// + /// svuint8_t svqxtunb[_s16](svint16_t op) + /// SQXTUNB Zresult.B, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_GD_2A SQXTUNB ., + /// theEmitter->emitIns_R_R(INS_sve_sqxtunb, EA_SCALABLE, REG_V0, REG_V8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqxtunb, EA_SCALABLE, REG_V0, REG_V6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqxtunb, EA_SCALABLE, REG_V0, REG_V9, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingExtractUnsignedNarrowingLower(Vector value) => SaturatingExtractUnsignedNarrowingLower(value); + + /// + /// svuint16_t svqxtunb[_s32](svint32_t op) + /// SQXTUNB Zresult.H, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_GD_2A SQXTUNB ., + /// theEmitter->emitIns_R_R(INS_sve_sqxtunb, EA_SCALABLE, REG_V0, REG_V8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqxtunb, EA_SCALABLE, REG_V0, REG_V6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqxtunb, EA_SCALABLE, REG_V0, REG_V9, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingExtractUnsignedNarrowingLower(Vector value) => SaturatingExtractUnsignedNarrowingLower(value); + + /// + /// svuint32_t svqxtunb[_s64](svint64_t op) + /// SQXTUNB Zresult.S, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_GD_2A SQXTUNB ., + /// theEmitter->emitIns_R_R(INS_sve_sqxtunb, EA_SCALABLE, REG_V0, REG_V8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqxtunb, EA_SCALABLE, REG_V0, REG_V6, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqxtunb, EA_SCALABLE, REG_V0, REG_V9, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingExtractUnsignedNarrowingLower(Vector value) => SaturatingExtractUnsignedNarrowingLower(value); + + + /// SaturatingExtractUnsignedNarrowingUpper : Saturating extract unsigned narrow (top) + + /// + /// svuint8_t svqxtunt[_s16](svuint8_t even, svint16_t op) + /// SQXTUNT Ztied.B, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_GD_2A SQXTUNT ., + /// theEmitter->emitIns_R_R(INS_sve_sqxtunt, EA_SCALABLE, REG_V5, REG_V3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqxtunt, EA_SCALABLE, REG_V0, REG_V4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqxtunt, EA_SCALABLE, REG_V0, REG_V4, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingExtractUnsignedNarrowingUpper(Vector even, Vector op) => SaturatingExtractUnsignedNarrowingUpper(even, op); + + /// + /// svuint16_t svqxtunt[_s32](svuint16_t even, svint32_t op) + /// SQXTUNT Ztied.H, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_GD_2A SQXTUNT ., + /// theEmitter->emitIns_R_R(INS_sve_sqxtunt, EA_SCALABLE, REG_V5, REG_V3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqxtunt, EA_SCALABLE, REG_V0, REG_V4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqxtunt, EA_SCALABLE, REG_V0, REG_V4, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingExtractUnsignedNarrowingUpper(Vector even, Vector op) => SaturatingExtractUnsignedNarrowingUpper(even, op); + + /// + /// svuint32_t svqxtunt[_s64](svuint32_t even, svint64_t op) + /// SQXTUNT Ztied.S, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_GD_2A SQXTUNT ., + /// theEmitter->emitIns_R_R(INS_sve_sqxtunt, EA_SCALABLE, REG_V5, REG_V3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_sqxtunt, EA_SCALABLE, REG_V0, REG_V4, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_sqxtunt, EA_SCALABLE, REG_V0, REG_V4, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingExtractUnsignedNarrowingUpper(Vector even, Vector op) => SaturatingExtractUnsignedNarrowingUpper(even, op); + + + /// SaturatingNegate : Saturating negate + + /// + /// svint8_t svqneg[_s8]_m(svint8_t inactive, svbool_t pg, svint8_t op) + /// SQNEG Ztied.B, Pg/M, Zop.B + /// MOVPRFX Zresult, Zinactive; SQNEG Zresult.B, Pg/M, Zop.B + /// svint8_t svqneg[_s8]_x(svbool_t pg, svint8_t op) + /// SQNEG Ztied.B, Pg/M, Ztied.B + /// MOVPRFX Zresult, Zop; SQNEG Zresult.B, Pg/M, Zop.B + /// svint8_t svqneg[_s8]_z(svbool_t pg, svint8_t op) + /// MOVPRFX Zresult.B, Pg/Z, Zop.B; SQNEG Zresult.B, Pg/M, Zop.B + /// + /// codegenarm64test: + /// IF_SVE_ES_3A SQNEG ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_sqneg, EA_SCALABLE, REG_V31, REG_P6, REG_V1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_sqneg, EA_SCALABLE, REG_V0, REG_P5, REG_V2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_sqneg, EA_SCALABLE, REG_V1, REG_P4, REG_V3, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SaturatingNegate(Vector value) => SaturatingNegate(value); + + /// + /// svint16_t svqneg[_s16]_m(svint16_t inactive, svbool_t pg, svint16_t op) + /// SQNEG Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; SQNEG Zresult.H, Pg/M, Zop.H + /// svint16_t svqneg[_s16]_x(svbool_t pg, svint16_t op) + /// SQNEG Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; SQNEG Zresult.H, Pg/M, Zop.H + /// svint16_t svqneg[_s16]_z(svbool_t pg, svint16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; SQNEG Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_ES_3A SQNEG ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_sqneg, EA_SCALABLE, REG_V31, REG_P6, REG_V1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_sqneg, EA_SCALABLE, REG_V0, REG_P5, REG_V2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_sqneg, EA_SCALABLE, REG_V1, REG_P4, REG_V3, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SaturatingNegate(Vector value) => SaturatingNegate(value); + + /// + /// svint32_t svqneg[_s32]_m(svint32_t inactive, svbool_t pg, svint32_t op) + /// SQNEG Ztied.S, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; SQNEG Zresult.S, Pg/M, Zop.S + /// svint32_t svqneg[_s32]_x(svbool_t pg, svint32_t op) + /// SQNEG Ztied.S, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; SQNEG Zresult.S, Pg/M, Zop.S + /// svint32_t svqneg[_s32]_z(svbool_t pg, svint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; SQNEG Zresult.S, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_ES_3A SQNEG ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_sqneg, EA_SCALABLE, REG_V31, REG_P6, REG_V1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_sqneg, EA_SCALABLE, REG_V0, REG_P5, REG_V2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_sqneg, EA_SCALABLE, REG_V1, REG_P4, REG_V3, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SaturatingNegate(Vector value) => SaturatingNegate(value); + + /// + /// svint64_t svqneg[_s64]_m(svint64_t inactive, svbool_t pg, svint64_t op) + /// SQNEG Ztied.D, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; SQNEG Zresult.D, Pg/M, Zop.D + /// svint64_t svqneg[_s64]_x(svbool_t pg, svint64_t op) + /// SQNEG Ztied.D, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; SQNEG Zresult.D, Pg/M, Zop.D + /// svint64_t svqneg[_s64]_z(svbool_t pg, svint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; SQNEG Zresult.D, Pg/M, Zop.D + /// + /// codegenarm64test: + /// IF_SVE_ES_3A SQNEG ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_sqneg, EA_SCALABLE, REG_V31, REG_P6, REG_V1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_sqneg, EA_SCALABLE, REG_V0, REG_P5, REG_V2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_sqneg, EA_SCALABLE, REG_V1, REG_P4, REG_V3, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SaturatingNegate(Vector value) => SaturatingNegate(value); + + + /// SaturatingRoundingDoublingComplexMultiplyAddHighRotate : Saturating rounding doubling complex multiply-add high with rotate + + /// + /// svint8_t svqrdcmlah[_s8](svint8_t op1, svint8_t op2, svint8_t op3, uint64_t imm_rotation) + /// SQRDCMLAH Ztied1.B, Zop2.B, Zop3.B, #imm_rotation + /// MOVPRFX Zresult, Zop1; SQRDCMLAH Zresult.B, Zop2.B, Zop3.B, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_EK_3A SQRDCMLAH ., ., ., + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V12, REG_V13, REG_V14, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V15, REG_V16, REG_V17, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V18, REG_V19, REG_V20, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V21, REG_V22, REG_V23, 270, INS_OPTS_SCALABLE_D); + /// IF_SVE_FC_3A SQRDCMLAH .H, .H, .H[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V0, REG_V7, REG_V1, 3, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V2, REG_V5, REG_V3, 2, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V4, REG_V3, REG_V5, 1, 180, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V6, REG_V1, REG_V7, 0, 270, INS_OPTS_SCALABLE_H); + /// IF_SVE_FC_3B SQRDCMLAH .S, .S, .S[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1, 270, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(Vector op1, Vector op2, Vector op3, [ConstantExpected] byte rotation) => SaturatingRoundingDoublingComplexMultiplyAddHighRotate(op1, op2, op3, rotation); + + /// + /// svint16_t svqrdcmlah[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_rotation) + /// SQRDCMLAH Ztied1.H, Zop2.H, Zop3.H, #imm_rotation + /// MOVPRFX Zresult, Zop1; SQRDCMLAH Zresult.H, Zop2.H, Zop3.H, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_EK_3A SQRDCMLAH ., ., ., + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V12, REG_V13, REG_V14, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V15, REG_V16, REG_V17, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V18, REG_V19, REG_V20, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V21, REG_V22, REG_V23, 270, INS_OPTS_SCALABLE_D); + /// IF_SVE_FC_3A SQRDCMLAH .H, .H, .H[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V0, REG_V7, REG_V1, 3, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V2, REG_V5, REG_V3, 2, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V4, REG_V3, REG_V5, 1, 180, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V6, REG_V1, REG_V7, 0, 270, INS_OPTS_SCALABLE_H); + /// IF_SVE_FC_3B SQRDCMLAH .S, .S, .S[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1, 270, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(Vector op1, Vector op2, Vector op3, [ConstantExpected] byte rotation) => SaturatingRoundingDoublingComplexMultiplyAddHighRotate(op1, op2, op3, rotation); + + /// + /// svint16_t svqrdcmlah_lane[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// SQRDCMLAH Ztied1.H, Zop2.H, Zop3.H[imm_index], #imm_rotation + /// MOVPRFX Zresult, Zop1; SQRDCMLAH Zresult.H, Zop2.H, Zop3.H[imm_index], #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_EK_3A SQRDCMLAH ., ., ., + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V12, REG_V13, REG_V14, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V15, REG_V16, REG_V17, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V18, REG_V19, REG_V20, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V21, REG_V22, REG_V23, 270, INS_OPTS_SCALABLE_D); + /// IF_SVE_FC_3A SQRDCMLAH .H, .H, .H[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V0, REG_V7, REG_V1, 3, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V2, REG_V5, REG_V3, 2, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V4, REG_V3, REG_V5, 1, 180, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V6, REG_V1, REG_V7, 0, 270, INS_OPTS_SCALABLE_H); + /// IF_SVE_FC_3B SQRDCMLAH .S, .S, .S[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1, 270, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(Vector op1, Vector op2, Vector op3, ulong imm_index, [ConstantExpected] byte rotation) => SaturatingRoundingDoublingComplexMultiplyAddHighRotate(op1, op2, op3, imm_index, rotation); + + /// + /// svint32_t svqrdcmlah[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_rotation) + /// SQRDCMLAH Ztied1.S, Zop2.S, Zop3.S, #imm_rotation + /// MOVPRFX Zresult, Zop1; SQRDCMLAH Zresult.S, Zop2.S, Zop3.S, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_EK_3A SQRDCMLAH ., ., ., + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V12, REG_V13, REG_V14, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V15, REG_V16, REG_V17, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V18, REG_V19, REG_V20, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V21, REG_V22, REG_V23, 270, INS_OPTS_SCALABLE_D); + /// IF_SVE_FC_3A SQRDCMLAH .H, .H, .H[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V0, REG_V7, REG_V1, 3, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V2, REG_V5, REG_V3, 2, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V4, REG_V3, REG_V5, 1, 180, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V6, REG_V1, REG_V7, 0, 270, INS_OPTS_SCALABLE_H); + /// IF_SVE_FC_3B SQRDCMLAH .S, .S, .S[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1, 270, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(Vector op1, Vector op2, Vector op3, [ConstantExpected] byte rotation) => SaturatingRoundingDoublingComplexMultiplyAddHighRotate(op1, op2, op3, rotation); + + /// + /// svint32_t svqrdcmlah_lane[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// SQRDCMLAH Ztied1.S, Zop2.S, Zop3.S[imm_index], #imm_rotation + /// MOVPRFX Zresult, Zop1; SQRDCMLAH Zresult.S, Zop2.S, Zop3.S[imm_index], #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_EK_3A SQRDCMLAH ., ., ., + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V12, REG_V13, REG_V14, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V15, REG_V16, REG_V17, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V18, REG_V19, REG_V20, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V21, REG_V22, REG_V23, 270, INS_OPTS_SCALABLE_D); + /// IF_SVE_FC_3A SQRDCMLAH .H, .H, .H[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V0, REG_V7, REG_V1, 3, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V2, REG_V5, REG_V3, 2, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V4, REG_V3, REG_V5, 1, 180, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V6, REG_V1, REG_V7, 0, 270, INS_OPTS_SCALABLE_H); + /// IF_SVE_FC_3B SQRDCMLAH .S, .S, .S[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1, 270, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(Vector op1, Vector op2, Vector op3, ulong imm_index, [ConstantExpected] byte rotation) => SaturatingRoundingDoublingComplexMultiplyAddHighRotate(op1, op2, op3, imm_index, rotation); + + /// + /// svint64_t svqrdcmlah[_s64](svint64_t op1, svint64_t op2, svint64_t op3, uint64_t imm_rotation) + /// SQRDCMLAH Ztied1.D, Zop2.D, Zop3.D, #imm_rotation + /// MOVPRFX Zresult, Zop1; SQRDCMLAH Zresult.D, Zop2.D, Zop3.D, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_EK_3A SQRDCMLAH ., ., ., + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V12, REG_V13, REG_V14, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V15, REG_V16, REG_V17, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V18, REG_V19, REG_V20, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V21, REG_V22, REG_V23, 270, INS_OPTS_SCALABLE_D); + /// IF_SVE_FC_3A SQRDCMLAH .H, .H, .H[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V0, REG_V7, REG_V1, 3, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V2, REG_V5, REG_V3, 2, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V4, REG_V3, REG_V5, 1, 180, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V6, REG_V1, REG_V7, 0, 270, INS_OPTS_SCALABLE_H); + /// IF_SVE_FC_3B SQRDCMLAH .S, .S, .S[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_sqrdcmlah, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1, 270, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SaturatingRoundingDoublingComplexMultiplyAddHighRotate(Vector op1, Vector op2, Vector op3, [ConstantExpected] byte rotation) => SaturatingRoundingDoublingComplexMultiplyAddHighRotate(op1, op2, op3, rotation); + + + /// SaturatingRoundingDoublingMultiplyAddHigh : Saturating rounding doubling multiply-add high + + /// + /// svint8_t svqrdmlah[_s8](svint8_t op1, svint8_t op2, svint8_t op3) + /// SQRDMLAH Ztied1.B, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; SQRDMLAH Zresult.B, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_EM_3A SQRDMLAH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlah, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlah, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_FK_3A SQRDMLAH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FK_3B SQRDMLAH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V8, REG_V9, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V10, REG_V11, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FK_3C SQRDMLAH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3) => SaturatingRoundingDoublingMultiplyAddHigh(op1, op2, op3); + + /// + /// svint16_t svqrdmlah[_s16](svint16_t op1, svint16_t op2, svint16_t op3) + /// SQRDMLAH Ztied1.H, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; SQRDMLAH Zresult.H, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_EM_3A SQRDMLAH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlah, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlah, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_FK_3A SQRDMLAH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FK_3B SQRDMLAH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V8, REG_V9, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V10, REG_V11, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FK_3C SQRDMLAH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3) => SaturatingRoundingDoublingMultiplyAddHigh(op1, op2, op3); + + /// + /// svint16_t svqrdmlah_lane[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// SQRDMLAH Ztied1.H, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; SQRDMLAH Zresult.H, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EM_3A SQRDMLAH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlah, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlah, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_FK_3A SQRDMLAH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FK_3B SQRDMLAH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V8, REG_V9, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V10, REG_V11, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FK_3C SQRDMLAH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingRoundingDoublingMultiplyAddHigh(op1, op2, op3, imm_index); + + /// + /// svint32_t svqrdmlah[_s32](svint32_t op1, svint32_t op2, svint32_t op3) + /// SQRDMLAH Ztied1.S, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; SQRDMLAH Zresult.S, Zop2.S, Zop3.S + /// + /// codegenarm64test: + /// IF_SVE_EM_3A SQRDMLAH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlah, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlah, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_FK_3A SQRDMLAH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FK_3B SQRDMLAH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V8, REG_V9, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V10, REG_V11, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FK_3C SQRDMLAH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3) => SaturatingRoundingDoublingMultiplyAddHigh(op1, op2, op3); + + /// + /// svint32_t svqrdmlah_lane[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// SQRDMLAH Ztied1.S, Zop2.S, Zop3.S[imm_index] + /// MOVPRFX Zresult, Zop1; SQRDMLAH Zresult.S, Zop2.S, Zop3.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EM_3A SQRDMLAH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlah, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlah, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_FK_3A SQRDMLAH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FK_3B SQRDMLAH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V8, REG_V9, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V10, REG_V11, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FK_3C SQRDMLAH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingRoundingDoublingMultiplyAddHigh(op1, op2, op3, imm_index); + + /// + /// svint64_t svqrdmlah[_s64](svint64_t op1, svint64_t op2, svint64_t op3) + /// SQRDMLAH Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; SQRDMLAH Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_EM_3A SQRDMLAH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlah, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlah, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_FK_3A SQRDMLAH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FK_3B SQRDMLAH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V8, REG_V9, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V10, REG_V11, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FK_3C SQRDMLAH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3) => SaturatingRoundingDoublingMultiplyAddHigh(op1, op2, op3); + + /// + /// svint64_t svqrdmlah_lane[_s64](svint64_t op1, svint64_t op2, svint64_t op3, uint64_t imm_index) + /// SQRDMLAH Ztied1.D, Zop2.D, Zop3.D[imm_index] + /// MOVPRFX Zresult, Zop1; SQRDMLAH Zresult.D, Zop2.D, Zop3.D[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EM_3A SQRDMLAH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlah, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlah, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_FK_3A SQRDMLAH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V0, REG_V1, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V2, REG_V3, REG_V3, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_FK_3B SQRDMLAH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V8, REG_V9, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V10, REG_V11, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_FK_3C SQRDMLAH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V16, REG_V17, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlah, EA_SCALABLE, REG_V18, REG_V19, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyAddHigh(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingRoundingDoublingMultiplyAddHigh(op1, op2, op3, imm_index); + + + /// SaturatingRoundingDoublingMultiplyHigh : Saturating rounding doubling multiply high + + /// + /// svint8_t svqrdmulh[_s8](svint8_t op1, svint8_t op2) + /// SQRDMULH Zresult.B, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_BE_3A SQRDMULH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmulh, EA_SCALABLE, REG_V23, REG_V3, REG_V31, INS_OPTS_SCALABLE_H); + /// IF_SVE_FI_3A SQRDMULH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FI_3B SQRDMULH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V12, REG_V13, REG_V4, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V14, REG_V15, REG_V6, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FI_3C SQRDMULH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector left, Vector right) => SaturatingRoundingDoublingMultiplyHigh(left, right); + + /// + /// svint16_t svqrdmulh[_s16](svint16_t op1, svint16_t op2) + /// SQRDMULH Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_BE_3A SQRDMULH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmulh, EA_SCALABLE, REG_V23, REG_V3, REG_V31, INS_OPTS_SCALABLE_H); + /// IF_SVE_FI_3A SQRDMULH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FI_3B SQRDMULH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V12, REG_V13, REG_V4, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V14, REG_V15, REG_V6, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FI_3C SQRDMULH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector left, Vector right) => SaturatingRoundingDoublingMultiplyHigh(left, right); + + /// + /// svint16_t svqrdmulh_lane[_s16](svint16_t op1, svint16_t op2, uint64_t imm_index) + /// SQRDMULH Zresult.H, Zop1.H, Zop2.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_BE_3A SQRDMULH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmulh, EA_SCALABLE, REG_V23, REG_V3, REG_V31, INS_OPTS_SCALABLE_H); + /// IF_SVE_FI_3A SQRDMULH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FI_3B SQRDMULH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V12, REG_V13, REG_V4, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V14, REG_V15, REG_V6, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FI_3C SQRDMULH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector op1, Vector op2, ulong imm_index) => SaturatingRoundingDoublingMultiplyHigh(op1, op2, imm_index); + + /// + /// svint32_t svqrdmulh[_s32](svint32_t op1, svint32_t op2) + /// SQRDMULH Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_BE_3A SQRDMULH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmulh, EA_SCALABLE, REG_V23, REG_V3, REG_V31, INS_OPTS_SCALABLE_H); + /// IF_SVE_FI_3A SQRDMULH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FI_3B SQRDMULH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V12, REG_V13, REG_V4, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V14, REG_V15, REG_V6, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FI_3C SQRDMULH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector left, Vector right) => SaturatingRoundingDoublingMultiplyHigh(left, right); + + /// + /// svint32_t svqrdmulh_lane[_s32](svint32_t op1, svint32_t op2, uint64_t imm_index) + /// SQRDMULH Zresult.S, Zop1.S, Zop2.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_BE_3A SQRDMULH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmulh, EA_SCALABLE, REG_V23, REG_V3, REG_V31, INS_OPTS_SCALABLE_H); + /// IF_SVE_FI_3A SQRDMULH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FI_3B SQRDMULH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V12, REG_V13, REG_V4, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V14, REG_V15, REG_V6, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FI_3C SQRDMULH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector op1, Vector op2, ulong imm_index) => SaturatingRoundingDoublingMultiplyHigh(op1, op2, imm_index); + + /// + /// svint64_t svqrdmulh[_s64](svint64_t op1, svint64_t op2) + /// SQRDMULH Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_BE_3A SQRDMULH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmulh, EA_SCALABLE, REG_V23, REG_V3, REG_V31, INS_OPTS_SCALABLE_H); + /// IF_SVE_FI_3A SQRDMULH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FI_3B SQRDMULH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V12, REG_V13, REG_V4, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V14, REG_V15, REG_V6, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FI_3C SQRDMULH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector left, Vector right) => SaturatingRoundingDoublingMultiplyHigh(left, right); + + /// + /// svint64_t svqrdmulh_lane[_s64](svint64_t op1, svint64_t op2, uint64_t imm_index) + /// SQRDMULH Zresult.D, Zop1.D, Zop2.D[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_BE_3A SQRDMULH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmulh, EA_SCALABLE, REG_V23, REG_V3, REG_V31, INS_OPTS_SCALABLE_H); + /// IF_SVE_FI_3A SQRDMULH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FI_3B SQRDMULH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V12, REG_V13, REG_V4, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V14, REG_V15, REG_V6, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FI_3C SQRDMULH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmulh, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplyHigh(Vector op1, Vector op2, ulong imm_index) => SaturatingRoundingDoublingMultiplyHigh(op1, op2, imm_index); + + + /// SaturatingRoundingDoublingMultiplySubtractHigh : Saturating rounding doubling multiply-subtract high + + /// + /// svint8_t svqrdmlsh[_s8](svint8_t op1, svint8_t op2, svint8_t op3) + /// SQRDMLSH Ztied1.B, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; SQRDMLSH Zresult.B, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_EM_3A SQRDMLSH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// IF_SVE_FK_3A SQRDMLSH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FK_3B SQRDMLSH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V12, REG_V13, REG_V4, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V14, REG_V15, REG_V6, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FK_3C SQRDMLSH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3) => SaturatingRoundingDoublingMultiplySubtractHigh(op1, op2, op3); + + /// + /// svint16_t svqrdmlsh[_s16](svint16_t op1, svint16_t op2, svint16_t op3) + /// SQRDMLSH Ztied1.H, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; SQRDMLSH Zresult.H, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_EM_3A SQRDMLSH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// IF_SVE_FK_3A SQRDMLSH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FK_3B SQRDMLSH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V12, REG_V13, REG_V4, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V14, REG_V15, REG_V6, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FK_3C SQRDMLSH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3) => SaturatingRoundingDoublingMultiplySubtractHigh(op1, op2, op3); + + /// + /// svint16_t svqrdmlsh_lane[_s16](svint16_t op1, svint16_t op2, svint16_t op3, uint64_t imm_index) + /// SQRDMLSH Ztied1.H, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; SQRDMLSH Zresult.H, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EM_3A SQRDMLSH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// IF_SVE_FK_3A SQRDMLSH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FK_3B SQRDMLSH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V12, REG_V13, REG_V4, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V14, REG_V15, REG_V6, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FK_3C SQRDMLSH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingRoundingDoublingMultiplySubtractHigh(op1, op2, op3, imm_index); + + /// + /// svint32_t svqrdmlsh[_s32](svint32_t op1, svint32_t op2, svint32_t op3) + /// SQRDMLSH Ztied1.S, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; SQRDMLSH Zresult.S, Zop2.S, Zop3.S + /// + /// codegenarm64test: + /// IF_SVE_EM_3A SQRDMLSH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// IF_SVE_FK_3A SQRDMLSH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FK_3B SQRDMLSH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V12, REG_V13, REG_V4, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V14, REG_V15, REG_V6, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FK_3C SQRDMLSH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3) => SaturatingRoundingDoublingMultiplySubtractHigh(op1, op2, op3); + + /// + /// svint32_t svqrdmlsh_lane[_s32](svint32_t op1, svint32_t op2, svint32_t op3, uint64_t imm_index) + /// SQRDMLSH Ztied1.S, Zop2.S, Zop3.S[imm_index] + /// MOVPRFX Zresult, Zop1; SQRDMLSH Zresult.S, Zop2.S, Zop3.S[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EM_3A SQRDMLSH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// IF_SVE_FK_3A SQRDMLSH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FK_3B SQRDMLSH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V12, REG_V13, REG_V4, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V14, REG_V15, REG_V6, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FK_3C SQRDMLSH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingRoundingDoublingMultiplySubtractHigh(op1, op2, op3, imm_index); + + /// + /// svint64_t svqrdmlsh[_s64](svint64_t op1, svint64_t op2, svint64_t op3) + /// SQRDMLSH Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; SQRDMLSH Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_EM_3A SQRDMLSH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// IF_SVE_FK_3A SQRDMLSH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FK_3B SQRDMLSH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V12, REG_V13, REG_V4, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V14, REG_V15, REG_V6, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FK_3C SQRDMLSH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3) => SaturatingRoundingDoublingMultiplySubtractHigh(op1, op2, op3); + + /// + /// svint64_t svqrdmlsh_lane[_s64](svint64_t op1, svint64_t op2, svint64_t op3, uint64_t imm_index) + /// SQRDMLSH Ztied1.D, Zop2.D, Zop3.D[imm_index] + /// MOVPRFX Zresult, Zop1; SQRDMLSH Zresult.D, Zop2.D, Zop3.D[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EM_3A SQRDMLSH ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// IF_SVE_FK_3A SQRDMLSH .H, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V4, REG_V5, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V6, REG_V7, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_FK_3B SQRDMLSH .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V12, REG_V13, REG_V4, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V14, REG_V15, REG_V6, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_FK_3C SQRDMLSH .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V20, REG_V21, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sqrdmlsh, EA_SCALABLE, REG_V22, REG_V23, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SaturatingRoundingDoublingMultiplySubtractHigh(Vector op1, Vector op2, Vector op3, ulong imm_index) => SaturatingRoundingDoublingMultiplySubtractHigh(op1, op2, op3, imm_index); + + + /// Scatter16BitNarrowing : Truncate to 16 bits and store, non-temporal + + /// + /// void svstnt1h_scatter[_u32base_s32](svbool_t pg, svuint32_t bases, svint32_t data) + /// STNT1H Zdata.S, Pg, [Zbases.S, XZR] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1H {.H }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1h, EA_SCALABLE, REG_V9, REG_P1, REG_R0, -5, INS_OPTS_SCALABLE_H); + /// IF_SVE_IZ_4A STNT1H {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V2, REG_P7, REG_V6, REG_R5, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1H {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V5, REG_P3, REG_V1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1H {.H }, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter16BitNarrowing(mask, addresses, data); + + /// + /// void svstnt1h_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) + /// STNT1H Zdata.D, Pg, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1H {.H }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1h, EA_SCALABLE, REG_V9, REG_P1, REG_R0, -5, INS_OPTS_SCALABLE_H); + /// IF_SVE_IZ_4A STNT1H {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V2, REG_P7, REG_V6, REG_R5, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1H {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V5, REG_P3, REG_V1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1H {.H }, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter16BitNarrowing(mask, addresses, data); + + /// + /// void svstnt1h_scatter[_u32base_u32](svbool_t pg, svuint32_t bases, svuint32_t data) + /// STNT1H Zdata.S, Pg, [Zbases.S, XZR] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1H {.H }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1h, EA_SCALABLE, REG_V9, REG_P1, REG_R0, -5, INS_OPTS_SCALABLE_H); + /// IF_SVE_IZ_4A STNT1H {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V2, REG_P7, REG_V6, REG_R5, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1H {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V5, REG_P3, REG_V1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1H {.H }, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter16BitNarrowing(mask, addresses, data); + + /// + /// void svstnt1h_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) + /// STNT1H Zdata.D, Pg, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1H {.H }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1h, EA_SCALABLE, REG_V9, REG_P1, REG_R0, -5, INS_OPTS_SCALABLE_H); + /// IF_SVE_IZ_4A STNT1H {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V2, REG_P7, REG_V6, REG_R5, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1H {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V5, REG_P3, REG_V1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1H {.H }, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter16BitNarrowing(mask, addresses, data); + + + /// Scatter16BitWithByteOffsetsNarrowing : Truncate to 16 bits and store, non-temporal + + /// + /// void svstnt1h_scatter_[u32]offset[_s32](svbool_t pg, int16_t *base, svuint32_t offsets, svint32_t data) + /// STNT1H Zdata.S, Pg, [Zoffsets.S, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1H {.H }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1h, EA_SCALABLE, REG_V9, REG_P1, REG_R0, -5, INS_OPTS_SCALABLE_H); + /// IF_SVE_IZ_4A STNT1H {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V2, REG_P7, REG_V6, REG_R5, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1H {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V5, REG_P3, REG_V1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1H {.H }, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1h_scatter_[s64]offset[_s64](svbool_t pg, int16_t *base, svint64_t offsets, svint64_t data) + /// STNT1H Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1H {.H }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1h, EA_SCALABLE, REG_V9, REG_P1, REG_R0, -5, INS_OPTS_SCALABLE_H); + /// IF_SVE_IZ_4A STNT1H {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V2, REG_P7, REG_V6, REG_R5, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1H {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V5, REG_P3, REG_V1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1H {.H }, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1h_scatter_[u64]offset[_s64](svbool_t pg, int16_t *base, svuint64_t offsets, svint64_t data) + /// STNT1H Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1H {.H }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1h, EA_SCALABLE, REG_V9, REG_P1, REG_R0, -5, INS_OPTS_SCALABLE_H); + /// IF_SVE_IZ_4A STNT1H {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V2, REG_P7, REG_V6, REG_R5, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1H {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V5, REG_P3, REG_V1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1H {.H }, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1h_scatter_[s64]index[_s64](svbool_t pg, int16_t *base, svint64_t indices, svint64_t data) + /// STNT1H Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1H {.H }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1h, EA_SCALABLE, REG_V9, REG_P1, REG_R0, -5, INS_OPTS_SCALABLE_H); + /// IF_SVE_IZ_4A STNT1H {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V2, REG_P7, REG_V6, REG_R5, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1H {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V5, REG_P3, REG_V1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1H {.H }, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svstnt1h_scatter_[u64]index[_s64](svbool_t pg, int16_t *base, svuint64_t indices, svint64_t data) + /// STNT1H Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1H {.H }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1h, EA_SCALABLE, REG_V9, REG_P1, REG_R0, -5, INS_OPTS_SCALABLE_H); + /// IF_SVE_IZ_4A STNT1H {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V2, REG_P7, REG_V6, REG_R5, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1H {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V5, REG_P3, REG_V1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1H {.H }, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector indices, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svstnt1h_scatter_[u32]offset[_u32](svbool_t pg, uint16_t *base, svuint32_t offsets, svuint32_t data) + /// STNT1H Zdata.S, Pg, [Zoffsets.S, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1H {.H }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1h, EA_SCALABLE, REG_V9, REG_P1, REG_R0, -5, INS_OPTS_SCALABLE_H); + /// IF_SVE_IZ_4A STNT1H {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V2, REG_P7, REG_V6, REG_R5, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1H {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V5, REG_P3, REG_V1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1H {.H }, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1h_scatter_[s64]offset[_u64](svbool_t pg, uint16_t *base, svint64_t offsets, svuint64_t data) + /// STNT1H Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1H {.H }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1h, EA_SCALABLE, REG_V9, REG_P1, REG_R0, -5, INS_OPTS_SCALABLE_H); + /// IF_SVE_IZ_4A STNT1H {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V2, REG_P7, REG_V6, REG_R5, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1H {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V5, REG_P3, REG_V1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1H {.H }, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1h_scatter_[u64]offset[_u64](svbool_t pg, uint16_t *base, svuint64_t offsets, svuint64_t data) + /// STNT1H Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1H {.H }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1h, EA_SCALABLE, REG_V9, REG_P1, REG_R0, -5, INS_OPTS_SCALABLE_H); + /// IF_SVE_IZ_4A STNT1H {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V2, REG_P7, REG_V6, REG_R5, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1H {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V5, REG_P3, REG_V1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1H {.H }, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1h_scatter_[s64]index[_u64](svbool_t pg, uint16_t *base, svint64_t indices, svuint64_t data) + /// STNT1H Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1H {.H }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1h, EA_SCALABLE, REG_V9, REG_P1, REG_R0, -5, INS_OPTS_SCALABLE_H); + /// IF_SVE_IZ_4A STNT1H {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V2, REG_P7, REG_V6, REG_R5, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1H {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V5, REG_P3, REG_V1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1H {.H }, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svstnt1h_scatter_[u64]index[_u64](svbool_t pg, uint16_t *base, svuint64_t indices, svuint64_t data) + /// STNT1H Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1H {.H }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1h, EA_SCALABLE, REG_V9, REG_P1, REG_R0, -5, INS_OPTS_SCALABLE_H); + /// IF_SVE_IZ_4A STNT1H {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V2, REG_P7, REG_V6, REG_R5, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1H {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V5, REG_P3, REG_V1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1H {.H }, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector indices, Vector data) => Scatter16BitWithByteOffsetsNarrowing(mask, address, indices, data); + + + /// Scatter32BitNarrowing : Truncate to 32 bits and store, non-temporal + + /// + /// void svstnt1w_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) + /// STNT1W Zdata.D, Pg, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1W {.S }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P0, REG_R2, -7, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A STNT1W {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_R0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1W {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V3, REG_P1, REG_V2, REG_R0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1W {.S }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P5, REG_R6, REG_R7, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Scatter32BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter32BitNarrowing(mask, addresses, data); + + /// + /// void svstnt1w_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) + /// STNT1W Zdata.D, Pg, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1W {.S }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P0, REG_R2, -7, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A STNT1W {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_R0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1W {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V3, REG_P1, REG_V2, REG_R0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1W {.S }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P5, REG_R6, REG_R7, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Scatter32BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter32BitNarrowing(mask, addresses, data); + + + /// Scatter32BitWithByteOffsetsNarrowing : Truncate to 32 bits and store, non-temporal + + /// + /// void svstnt1w_scatter_[s64]offset[_s64](svbool_t pg, int32_t *base, svint64_t offsets, svint64_t data) + /// STNT1W Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1W {.S }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P0, REG_R2, -7, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A STNT1W {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_R0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1W {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V3, REG_P1, REG_V2, REG_R0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1W {.S }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P5, REG_R6, REG_R7, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector offsets, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1w_scatter_[u64]offset[_s64](svbool_t pg, int32_t *base, svuint64_t offsets, svint64_t data) + /// STNT1W Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1W {.S }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P0, REG_R2, -7, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A STNT1W {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_R0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1W {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V3, REG_P1, REG_V2, REG_R0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1W {.S }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P5, REG_R6, REG_R7, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector offsets, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1w_scatter_[s64]index[_s64](svbool_t pg, int32_t *base, svint64_t indices, svint64_t data) + /// STNT1W Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1W {.S }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P0, REG_R2, -7, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A STNT1W {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_R0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1W {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V3, REG_P1, REG_V2, REG_R0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1W {.S }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P5, REG_R6, REG_R7, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector indices, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svstnt1w_scatter_[u64]index[_s64](svbool_t pg, int32_t *base, svuint64_t indices, svint64_t data) + /// STNT1W Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1W {.S }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P0, REG_R2, -7, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A STNT1W {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_R0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1W {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V3, REG_P1, REG_V2, REG_R0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1W {.S }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P5, REG_R6, REG_R7, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector indices, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svstnt1w_scatter_[s64]offset[_u64](svbool_t pg, uint32_t *base, svint64_t offsets, svuint64_t data) + /// STNT1W Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1W {.S }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P0, REG_R2, -7, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A STNT1W {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_R0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1W {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V3, REG_P1, REG_V2, REG_R0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1W {.S }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P5, REG_R6, REG_R7, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector offsets, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1w_scatter_[u64]offset[_u64](svbool_t pg, uint32_t *base, svuint64_t offsets, svuint64_t data) + /// STNT1W Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1W {.S }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P0, REG_R2, -7, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A STNT1W {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_R0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1W {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V3, REG_P1, REG_V2, REG_R0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1W {.S }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P5, REG_R6, REG_R7, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector offsets, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1w_scatter_[s64]index[_u64](svbool_t pg, uint32_t *base, svint64_t indices, svuint64_t data) + /// STNT1W Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1W {.S }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P0, REG_R2, -7, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A STNT1W {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_R0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1W {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V3, REG_P1, REG_V2, REG_R0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1W {.S }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P5, REG_R6, REG_R7, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector indices, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, indices, data); + + /// + /// void svstnt1w_scatter_[u64]index[_u64](svbool_t pg, uint32_t *base, svuint64_t indices, svuint64_t data) + /// STNT1W Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1W {.S }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P0, REG_R2, -7, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A STNT1W {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_R0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1W {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V3, REG_P1, REG_V2, REG_R0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1W {.S }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P5, REG_R6, REG_R7, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector indices, Vector data) => Scatter32BitWithByteOffsetsNarrowing(mask, address, indices, data); + + + /// Scatter8BitNarrowing : Truncate to 8 bits and store, non-temporal + + /// + /// void svstnt1b_scatter[_u32base_s32](svbool_t pg, svuint32_t bases, svint32_t data) + /// STNT1B Zdata.S, Pg, [Zbases.S, XZR] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1B {.B }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, 4, INS_OPTS_SCALABLE_B); + /// IF_SVE_IZ_4A STNT1B {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P2, REG_V3, REG_R4, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1B {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P4, REG_V6, REG_R8, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P4, REG_V6, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1B {.B }, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V6, REG_P5, REG_R4, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter8BitNarrowing(mask, addresses, data); + + /// + /// void svstnt1b_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) + /// STNT1B Zdata.D, Pg, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1B {.B }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, 4, INS_OPTS_SCALABLE_B); + /// IF_SVE_IZ_4A STNT1B {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P2, REG_V3, REG_R4, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1B {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P4, REG_V6, REG_R8, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P4, REG_V6, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1B {.B }, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V6, REG_P5, REG_R4, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter8BitNarrowing(mask, addresses, data); + + /// + /// void svstnt1b_scatter[_u32base_u32](svbool_t pg, svuint32_t bases, svuint32_t data) + /// STNT1B Zdata.S, Pg, [Zbases.S, XZR] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1B {.B }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, 4, INS_OPTS_SCALABLE_B); + /// IF_SVE_IZ_4A STNT1B {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P2, REG_V3, REG_R4, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1B {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P4, REG_V6, REG_R8, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P4, REG_V6, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1B {.B }, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V6, REG_P5, REG_R4, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter8BitNarrowing(mask, addresses, data); + + /// + /// void svstnt1b_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) + /// STNT1B Zdata.D, Pg, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1B {.B }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, 4, INS_OPTS_SCALABLE_B); + /// IF_SVE_IZ_4A STNT1B {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P2, REG_V3, REG_R4, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1B {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P4, REG_V6, REG_R8, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P4, REG_V6, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1B {.B }, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V6, REG_P5, REG_R4, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data) => Scatter8BitNarrowing(mask, addresses, data); + + + /// Scatter8BitWithByteOffsetsNarrowing : Truncate to 8 bits and store, non-temporal + + /// + /// void svstnt1b_scatter_[u32]offset[_s32](svbool_t pg, int8_t *base, svuint32_t offsets, svint32_t data) + /// STNT1B Zdata.S, Pg, [Zoffsets.S, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1B {.B }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, 4, INS_OPTS_SCALABLE_B); + /// IF_SVE_IZ_4A STNT1B {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P2, REG_V3, REG_R4, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1B {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P4, REG_V6, REG_R8, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P4, REG_V6, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1B {.B }, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V6, REG_P5, REG_R4, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1b_scatter_[s64]offset[_s64](svbool_t pg, int8_t *base, svint64_t offsets, svint64_t data) + /// STNT1B Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1B {.B }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, 4, INS_OPTS_SCALABLE_B); + /// IF_SVE_IZ_4A STNT1B {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P2, REG_V3, REG_R4, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1B {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P4, REG_V6, REG_R8, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P4, REG_V6, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1B {.B }, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V6, REG_P5, REG_R4, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1b_scatter_[u64]offset[_s64](svbool_t pg, int8_t *base, svuint64_t offsets, svint64_t data) + /// STNT1B Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1B {.B }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, 4, INS_OPTS_SCALABLE_B); + /// IF_SVE_IZ_4A STNT1B {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P2, REG_V3, REG_R4, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1B {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P4, REG_V6, REG_R8, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P4, REG_V6, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1B {.B }, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V6, REG_P5, REG_R4, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1b_scatter_[u32]offset[_u32](svbool_t pg, uint8_t *base, svuint32_t offsets, svuint32_t data) + /// STNT1B Zdata.S, Pg, [Zoffsets.S, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1B {.B }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, 4, INS_OPTS_SCALABLE_B); + /// IF_SVE_IZ_4A STNT1B {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P2, REG_V3, REG_R4, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1B {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P4, REG_V6, REG_R8, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P4, REG_V6, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1B {.B }, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V6, REG_P5, REG_R4, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1b_scatter_[s64]offset[_u64](svbool_t pg, uint8_t *base, svint64_t offsets, svuint64_t data) + /// STNT1B Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1B {.B }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, 4, INS_OPTS_SCALABLE_B); + /// IF_SVE_IZ_4A STNT1B {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P2, REG_V3, REG_R4, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1B {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P4, REG_V6, REG_R8, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P4, REG_V6, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1B {.B }, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V6, REG_P5, REG_R4, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + /// + /// void svstnt1b_scatter_[u64]offset[_u64](svbool_t pg, uint8_t *base, svuint64_t offsets, svuint64_t data) + /// STNT1B Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1B {.B }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1b, EA_SCALABLE, REG_V1, REG_P2, REG_R3, 4, INS_OPTS_SCALABLE_B); + /// IF_SVE_IZ_4A STNT1B {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P2, REG_V3, REG_R4, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1B {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P4, REG_V6, REG_R8, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V0, REG_P4, REG_V6, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1B {.B }, , [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1b, EA_SCALABLE, REG_V6, REG_P5, REG_R4, REG_R3, INS_OPTS_SCALABLE_B); + /// + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data) => Scatter8BitWithByteOffsetsNarrowing(mask, address, offsets, data); + + + /// ScatterNonTemporal : Non-truncating store, non-temporal + + /// + /// void svstnt1_scatter[_u32base_s32](svbool_t pg, svuint32_t bases, svint32_t data) + /// STNT1W Zdata.S, Pg, [Zbases.S, XZR] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1W {.S }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P0, REG_R2, -7, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A STNT1W {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_R0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1W {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V3, REG_P1, REG_V2, REG_R0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1W {.S }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P5, REG_R6, REG_R7, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void ScatterNonTemporal(Vector mask, Vector addresses, Vector data) => ScatterNonTemporal(mask, addresses, data); + + /// + /// void svstnt1_scatter_[u32]offset[_s32](svbool_t pg, int32_t *base, svuint32_t offsets, svint32_t data) + /// STNT1W Zdata.S, Pg, [Zoffsets.S, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1W {.S }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P0, REG_R2, -7, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A STNT1W {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_R0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1W {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V3, REG_P1, REG_V2, REG_R0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1W {.S }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P5, REG_R6, REG_R7, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void ScatterNonTemporal(Vector mask, int* base, Vector offsets, Vector data) => ScatterNonTemporal(mask, base, offsets, data); + + /// + /// void svstnt1_scatter[_u64base_s64](svbool_t pg, svuint64_t bases, svint64_t data) + /// STNT1D Zdata.D, Pg, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1d, EA_SCALABLE, REG_V8, REG_P7, REG_R6, 5, INS_OPTS_SCALABLE_D); + /// IF_SVE_JA_4A STNT1D {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V0, REG_P4, REG_V5, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V7, REG_P6, REG_R5, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void ScatterNonTemporal(Vector mask, Vector addresses, Vector data) => ScatterNonTemporal(mask, addresses, data); + + /// + /// void svstnt1_scatter_[s64]offset[_s64](svbool_t pg, int64_t *base, svint64_t offsets, svint64_t data) + /// STNT1D Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1d, EA_SCALABLE, REG_V8, REG_P7, REG_R6, 5, INS_OPTS_SCALABLE_D); + /// IF_SVE_JA_4A STNT1D {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V0, REG_P4, REG_V5, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V7, REG_P6, REG_R5, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void ScatterNonTemporal(Vector mask, long* base, Vector offsets, Vector data) => ScatterNonTemporal(mask, base, offsets, data); + + /// + /// void svstnt1_scatter_[u64]offset[_s64](svbool_t pg, int64_t *base, svuint64_t offsets, svint64_t data) + /// STNT1D Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1d, EA_SCALABLE, REG_V8, REG_P7, REG_R6, 5, INS_OPTS_SCALABLE_D); + /// IF_SVE_JA_4A STNT1D {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V0, REG_P4, REG_V5, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V7, REG_P6, REG_R5, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void ScatterNonTemporal(Vector mask, long* base, Vector offsets, Vector data) => ScatterNonTemporal(mask, base, offsets, data); + + /// + /// void svstnt1_scatter_[s64]index[_s64](svbool_t pg, int64_t *base, svint64_t indices, svint64_t data) + /// STNT1D Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1d, EA_SCALABLE, REG_V8, REG_P7, REG_R6, 5, INS_OPTS_SCALABLE_D); + /// IF_SVE_JA_4A STNT1D {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V0, REG_P4, REG_V5, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V7, REG_P6, REG_R5, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void ScatterNonTemporal(Vector mask, long* base, Vector indices, Vector data) => ScatterNonTemporal(mask, base, indices, data); + + /// + /// void svstnt1_scatter_[u64]index[_s64](svbool_t pg, int64_t *base, svuint64_t indices, svint64_t data) + /// STNT1D Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1d, EA_SCALABLE, REG_V8, REG_P7, REG_R6, 5, INS_OPTS_SCALABLE_D); + /// IF_SVE_JA_4A STNT1D {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V0, REG_P4, REG_V5, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V7, REG_P6, REG_R5, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void ScatterNonTemporal(Vector mask, long* base, Vector indices, Vector data) => ScatterNonTemporal(mask, base, indices, data); + + /// + /// void svstnt1_scatter[_u32base_u32](svbool_t pg, svuint32_t bases, svuint32_t data) + /// STNT1W Zdata.S, Pg, [Zbases.S, XZR] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1W {.S }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P0, REG_R2, -7, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A STNT1W {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_R0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1W {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V3, REG_P1, REG_V2, REG_R0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1W {.S }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P5, REG_R6, REG_R7, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void ScatterNonTemporal(Vector mask, Vector addresses, Vector data) => ScatterNonTemporal(mask, addresses, data); + + /// + /// void svstnt1_scatter_[u32]offset[_u32](svbool_t pg, uint32_t *base, svuint32_t offsets, svuint32_t data) + /// STNT1W Zdata.S, Pg, [Zoffsets.S, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1W {.S }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P0, REG_R2, -7, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A STNT1W {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_R0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1W {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V3, REG_P1, REG_V2, REG_R0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1W {.S }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P5, REG_R6, REG_R7, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void ScatterNonTemporal(Vector mask, uint* base, Vector offsets, Vector data) => ScatterNonTemporal(mask, base, offsets, data); + + /// + /// void svstnt1_scatter[_u64base_u64](svbool_t pg, svuint64_t bases, svuint64_t data) + /// STNT1D Zdata.D, Pg, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1d, EA_SCALABLE, REG_V8, REG_P7, REG_R6, 5, INS_OPTS_SCALABLE_D); + /// IF_SVE_JA_4A STNT1D {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V0, REG_P4, REG_V5, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V7, REG_P6, REG_R5, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void ScatterNonTemporal(Vector mask, Vector addresses, Vector data) => ScatterNonTemporal(mask, addresses, data); + + /// + /// void svstnt1_scatter_[s64]offset[_u64](svbool_t pg, uint64_t *base, svint64_t offsets, svuint64_t data) + /// STNT1D Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1d, EA_SCALABLE, REG_V8, REG_P7, REG_R6, 5, INS_OPTS_SCALABLE_D); + /// IF_SVE_JA_4A STNT1D {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V0, REG_P4, REG_V5, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V7, REG_P6, REG_R5, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void ScatterNonTemporal(Vector mask, ulong* base, Vector offsets, Vector data) => ScatterNonTemporal(mask, base, offsets, data); + + /// + /// void svstnt1_scatter_[u64]offset[_u64](svbool_t pg, uint64_t *base, svuint64_t offsets, svuint64_t data) + /// STNT1D Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1d, EA_SCALABLE, REG_V8, REG_P7, REG_R6, 5, INS_OPTS_SCALABLE_D); + /// IF_SVE_JA_4A STNT1D {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V0, REG_P4, REG_V5, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V7, REG_P6, REG_R5, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void ScatterNonTemporal(Vector mask, ulong* base, Vector offsets, Vector data) => ScatterNonTemporal(mask, base, offsets, data); + + /// + /// void svstnt1_scatter_[s64]index[_u64](svbool_t pg, uint64_t *base, svint64_t indices, svuint64_t data) + /// STNT1D Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1d, EA_SCALABLE, REG_V8, REG_P7, REG_R6, 5, INS_OPTS_SCALABLE_D); + /// IF_SVE_JA_4A STNT1D {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V0, REG_P4, REG_V5, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V7, REG_P6, REG_R5, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void ScatterNonTemporal(Vector mask, ulong* base, Vector indices, Vector data) => ScatterNonTemporal(mask, base, indices, data); + + /// + /// void svstnt1_scatter_[u64]index[_u64](svbool_t pg, uint64_t *base, svuint64_t indices, svuint64_t data) + /// STNT1D Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1d, EA_SCALABLE, REG_V8, REG_P7, REG_R6, 5, INS_OPTS_SCALABLE_D); + /// IF_SVE_JA_4A STNT1D {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V0, REG_P4, REG_V5, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V7, REG_P6, REG_R5, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void ScatterNonTemporal(Vector mask, ulong* base, Vector indices, Vector data) => ScatterNonTemporal(mask, base, indices, data); + + /// + /// void svstnt1_scatter[_u32base_f32](svbool_t pg, svuint32_t bases, svfloat32_t data) + /// STNT1W Zdata.S, Pg, [Zbases.S, XZR] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1W {.S }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P0, REG_R2, -7, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A STNT1W {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_R0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1W {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V3, REG_P1, REG_V2, REG_R0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1W {.S }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P5, REG_R6, REG_R7, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void ScatterNonTemporal(Vector mask, Vector addresses, Vector data) => ScatterNonTemporal(mask, addresses, data); + + /// + /// void svstnt1_scatter_[u32]offset[_f32](svbool_t pg, float32_t *base, svuint32_t offsets, svfloat32_t data) + /// STNT1W Zdata.S, Pg, [Zoffsets.S, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1W {.S }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P0, REG_R2, -7, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A STNT1W {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_R0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V1, REG_P3, REG_V2, REG_ZR, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1W {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V3, REG_P1, REG_V2, REG_R0, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1W {.S }, , [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1w, EA_SCALABLE, REG_V0, REG_P5, REG_R6, REG_R7, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void ScatterNonTemporal(Vector mask, float* base, Vector offsets, Vector data) => ScatterNonTemporal(mask, base, offsets, data); + + /// + /// void svstnt1_scatter_[s64]offset[_f64](svbool_t pg, float64_t *base, svint64_t offsets, svfloat64_t data) + /// STNT1D Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1d, EA_SCALABLE, REG_V8, REG_P7, REG_R6, 5, INS_OPTS_SCALABLE_D); + /// IF_SVE_JA_4A STNT1D {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V0, REG_P4, REG_V5, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V7, REG_P6, REG_R5, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void ScatterNonTemporal(Vector mask, double* base, Vector offsets, Vector data) => ScatterNonTemporal(mask, base, offsets, data); + + /// + /// void svstnt1_scatter_[s64]index[_f64](svbool_t pg, float64_t *base, svint64_t indices, svfloat64_t data) + /// STNT1D Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1d, EA_SCALABLE, REG_V8, REG_P7, REG_R6, 5, INS_OPTS_SCALABLE_D); + /// IF_SVE_JA_4A STNT1D {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V0, REG_P4, REG_V5, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V7, REG_P6, REG_R5, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void ScatterNonTemporal(Vector mask, double* base, Vector indices, Vector data) => ScatterNonTemporal(mask, base, indices, data); + + /// + /// void svstnt1_scatter[_u64base_f64](svbool_t pg, svuint64_t bases, svfloat64_t data) + /// STNT1D Zdata.D, Pg, [Zbases.D, XZR] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1d, EA_SCALABLE, REG_V8, REG_P7, REG_R6, 5, INS_OPTS_SCALABLE_D); + /// IF_SVE_JA_4A STNT1D {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V0, REG_P4, REG_V5, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V7, REG_P6, REG_R5, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void ScatterNonTemporal(Vector mask, Vector addresses, Vector data) => ScatterNonTemporal(mask, addresses, data); + + /// + /// void svstnt1_scatter_[u64]offset[_f64](svbool_t pg, float64_t *base, svuint64_t offsets, svfloat64_t data) + /// STNT1D Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1d, EA_SCALABLE, REG_V8, REG_P7, REG_R6, 5, INS_OPTS_SCALABLE_D); + /// IF_SVE_JA_4A STNT1D {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V0, REG_P4, REG_V5, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V7, REG_P6, REG_R5, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void ScatterNonTemporal(Vector mask, double* base, Vector offsets, Vector data) => ScatterNonTemporal(mask, base, offsets, data); + + /// + /// void svstnt1_scatter_[u64]index[_f64](svbool_t pg, float64_t *base, svuint64_t indices, svfloat64_t data) + /// STNT1D Zdata.D, Pg, [Zoffsets.D, Xbase] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1D {.D }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1d, EA_SCALABLE, REG_V8, REG_P7, REG_R6, 5, INS_OPTS_SCALABLE_D); + /// IF_SVE_JA_4A STNT1D {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V1, REG_P3, REG_V4, REG_R5, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V0, REG_P4, REG_V5, REG_ZR, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1D {.D }, , [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1d, EA_SCALABLE, REG_V7, REG_P6, REG_R5, REG_R4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void ScatterNonTemporal(Vector mask, double* base, Vector indices, Vector data) => ScatterNonTemporal(mask, base, indices, data); + + + /// ShiftArithmeticRounded : Rounding shift left + + /// + /// svint8_t svrshl[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// SRSHL Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; SRSHL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svrshl[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// SRSHL Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// SRSHLR Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; SRSHL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svrshl[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; SRSHL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; SRSHLR Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_EU_3A SRSHL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_srshl, EA_SCALABLE, REG_V8, REG_P3, REG_V27, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftArithmeticRounded(Vector value, Vector count) => ShiftArithmeticRounded(value, count); + + /// + /// svint16_t svrshl[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// SRSHL Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; SRSHL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svrshl[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// SRSHL Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// SRSHLR Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; SRSHL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svrshl[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; SRSHL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; SRSHLR Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_EU_3A SRSHL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_srshl, EA_SCALABLE, REG_V8, REG_P3, REG_V27, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftArithmeticRounded(Vector value, Vector count) => ShiftArithmeticRounded(value, count); + + /// + /// svint32_t svrshl[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// SRSHL Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; SRSHL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svrshl[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// SRSHL Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// SRSHLR Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; SRSHL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svrshl[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; SRSHL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; SRSHLR Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_EU_3A SRSHL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_srshl, EA_SCALABLE, REG_V8, REG_P3, REG_V27, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftArithmeticRounded(Vector value, Vector count) => ShiftArithmeticRounded(value, count); + + /// + /// svint64_t svrshl[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// SRSHL Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; SRSHL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svrshl[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// SRSHL Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// SRSHLR Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; SRSHL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svrshl[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; SRSHL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; SRSHLR Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_EU_3A SRSHL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_srshl, EA_SCALABLE, REG_V8, REG_P3, REG_V27, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftArithmeticRounded(Vector value, Vector count) => ShiftArithmeticRounded(value, count); + + + /// ShiftArithmeticRoundedSaturate : Saturating rounding shift left + + /// + /// svint8_t svqrshl[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// SQRSHL Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; SQRSHL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svqrshl[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// SQRSHL Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// SQRSHLR Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; SQRSHL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svqrshl[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; SQRSHL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; SQRSHLR Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_EU_3A SQRSHL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqrshl, EA_SCALABLE, REG_V4, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftArithmeticRoundedSaturate(Vector value, Vector count) => ShiftArithmeticRoundedSaturate(value, count); + + /// + /// svint16_t svqrshl[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// SQRSHL Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; SQRSHL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svqrshl[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// SQRSHL Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// SQRSHLR Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; SQRSHL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svqrshl[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; SQRSHL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; SQRSHLR Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_EU_3A SQRSHL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqrshl, EA_SCALABLE, REG_V4, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftArithmeticRoundedSaturate(Vector value, Vector count) => ShiftArithmeticRoundedSaturate(value, count); + + /// + /// svint32_t svqrshl[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// SQRSHL Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; SQRSHL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svqrshl[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// SQRSHL Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// SQRSHLR Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; SQRSHL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svqrshl[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; SQRSHL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; SQRSHLR Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_EU_3A SQRSHL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqrshl, EA_SCALABLE, REG_V4, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftArithmeticRoundedSaturate(Vector value, Vector count) => ShiftArithmeticRoundedSaturate(value, count); + + /// + /// svint64_t svqrshl[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// SQRSHL Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; SQRSHL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svqrshl[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// SQRSHL Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// SQRSHLR Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; SQRSHL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svqrshl[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; SQRSHL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; SQRSHLR Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_EU_3A SQRSHL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqrshl, EA_SCALABLE, REG_V4, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftArithmeticRoundedSaturate(Vector value, Vector count) => ShiftArithmeticRoundedSaturate(value, count); + + + /// ShiftArithmeticSaturate : Saturating shift left + + /// + /// svint8_t svqshl[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// SQSHL Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; SQSHL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svqshl[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// SQSHL Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// SQSHLR Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; SQSHL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svqshl[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; SQSHL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; SQSHLR Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : left shifts SQSHL ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqshl, EA_SCALABLE, REG_V7, REG_P3, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sqshl, EA_SCALABLE, REG_V9, REG_P4, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sqshl, EA_SCALABLE, REG_V11, REG_P0, 9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sqshl, EA_SCALABLE, REG_V13, REG_P7, 15, INS_OPTS_SCALABLE_H); + /// IF_SVE_EU_3A SQSHL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqshl, EA_SCALABLE, REG_V6, REG_P1, REG_V29, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftArithmeticSaturate(Vector value, Vector count) => ShiftArithmeticSaturate(value, count); + + /// + /// svint16_t svqshl[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// SQSHL Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; SQSHL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svqshl[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// SQSHL Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// SQSHLR Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; SQSHL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svqshl[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; SQSHL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; SQSHLR Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : left shifts SQSHL ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqshl, EA_SCALABLE, REG_V7, REG_P3, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sqshl, EA_SCALABLE, REG_V9, REG_P4, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sqshl, EA_SCALABLE, REG_V11, REG_P0, 9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sqshl, EA_SCALABLE, REG_V13, REG_P7, 15, INS_OPTS_SCALABLE_H); + /// IF_SVE_EU_3A SQSHL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqshl, EA_SCALABLE, REG_V6, REG_P1, REG_V29, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftArithmeticSaturate(Vector value, Vector count) => ShiftArithmeticSaturate(value, count); + + /// + /// svint32_t svqshl[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// SQSHL Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; SQSHL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svqshl[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// SQSHL Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// SQSHLR Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; SQSHL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svqshl[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; SQSHL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; SQSHLR Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : left shifts SQSHL ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqshl, EA_SCALABLE, REG_V7, REG_P3, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sqshl, EA_SCALABLE, REG_V9, REG_P4, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sqshl, EA_SCALABLE, REG_V11, REG_P0, 9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sqshl, EA_SCALABLE, REG_V13, REG_P7, 15, INS_OPTS_SCALABLE_H); + /// IF_SVE_EU_3A SQSHL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqshl, EA_SCALABLE, REG_V6, REG_P1, REG_V29, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftArithmeticSaturate(Vector value, Vector count) => ShiftArithmeticSaturate(value, count); + + /// + /// svint64_t svqshl[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// SQSHL Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; SQSHL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svqshl[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// SQSHL Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// SQSHLR Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; SQSHL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svqshl[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; SQSHL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; SQSHLR Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : left shifts SQSHL ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqshl, EA_SCALABLE, REG_V7, REG_P3, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sqshl, EA_SCALABLE, REG_V9, REG_P4, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sqshl, EA_SCALABLE, REG_V11, REG_P0, 9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sqshl, EA_SCALABLE, REG_V13, REG_P7, 15, INS_OPTS_SCALABLE_H); + /// IF_SVE_EU_3A SQSHL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqshl, EA_SCALABLE, REG_V6, REG_P1, REG_V29, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftArithmeticSaturate(Vector value, Vector count) => ShiftArithmeticSaturate(value, count); + + + /// ShiftLeftAndInsert : Shift left and insert + + /// + /// svint8_t svsli[_n_s8](svint8_t op1, svint8_t op2, uint64_t imm3) + /// SLI Ztied1.B, Zop2.B, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FT_2A SLI ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 15, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 63, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftLeftAndInsert(left, right, shift); + + /// + /// svint16_t svsli[_n_s16](svint16_t op1, svint16_t op2, uint64_t imm3) + /// SLI Ztied1.H, Zop2.H, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FT_2A SLI ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 15, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 63, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftLeftAndInsert(left, right, shift); + + /// + /// svint32_t svsli[_n_s32](svint32_t op1, svint32_t op2, uint64_t imm3) + /// SLI Ztied1.S, Zop2.S, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FT_2A SLI ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 15, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 63, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftLeftAndInsert(left, right, shift); + + /// + /// svint64_t svsli[_n_s64](svint64_t op1, svint64_t op2, uint64_t imm3) + /// SLI Ztied1.D, Zop2.D, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FT_2A SLI ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 15, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 63, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftLeftAndInsert(left, right, shift); + + /// + /// svuint8_t svsli[_n_u8](svuint8_t op1, svuint8_t op2, uint64_t imm3) + /// SLI Ztied1.B, Zop2.B, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FT_2A SLI ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 15, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 63, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftLeftAndInsert(left, right, shift); + + /// + /// svuint16_t svsli[_n_u16](svuint16_t op1, svuint16_t op2, uint64_t imm3) + /// SLI Ztied1.H, Zop2.H, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FT_2A SLI ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 15, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 63, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftLeftAndInsert(left, right, shift); + + /// + /// svuint32_t svsli[_n_u32](svuint32_t op1, svuint32_t op2, uint64_t imm3) + /// SLI Ztied1.S, Zop2.S, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FT_2A SLI ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 15, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 63, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftLeftAndInsert(left, right, shift); + + /// + /// svuint64_t svsli[_n_u64](svuint64_t op1, svuint64_t op2, uint64_t imm3) + /// SLI Ztied1.D, Zop2.D, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FT_2A SLI ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 15, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V31, REG_V31, 63, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sli, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftLeftAndInsert(left, right, shift); + + + /// ShiftLeftLogicalSaturate : Saturating shift left + + /// + /// svuint8_t svqshl[_u8]_m(svbool_t pg, svuint8_t op1, svint8_t op2) + /// UQSHL Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; UQSHL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svqshl[_u8]_x(svbool_t pg, svuint8_t op1, svint8_t op2) + /// UQSHL Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// UQSHLR Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; UQSHL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svqshl[_u8]_z(svbool_t pg, svuint8_t op1, svint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; UQSHL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; UQSHLR Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : left shifts UQSHL ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_uqshl, EA_SCALABLE, REG_V17, REG_P7, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_uqshl, EA_SCALABLE, REG_V18, REG_P0, 18, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_uqshl, EA_SCALABLE, REG_V19, REG_P3, 32, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_uqshl, EA_SCALABLE, REG_V0, REG_P2, 63, INS_OPTS_SCALABLE_D); + /// IF_SVE_EU_3A UQSHL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqshl, EA_SCALABLE, REG_V12, REG_P7, REG_V23, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLeftLogicalSaturate(Vector value, Vector count) => ShiftLeftLogicalSaturate(value, count); + + /// + /// svuint16_t svqshl[_u16]_m(svbool_t pg, svuint16_t op1, svint16_t op2) + /// UQSHL Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; UQSHL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svqshl[_u16]_x(svbool_t pg, svuint16_t op1, svint16_t op2) + /// UQSHL Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// UQSHLR Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; UQSHL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svqshl[_u16]_z(svbool_t pg, svuint16_t op1, svint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; UQSHL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; UQSHLR Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : left shifts UQSHL ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_uqshl, EA_SCALABLE, REG_V17, REG_P7, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_uqshl, EA_SCALABLE, REG_V18, REG_P0, 18, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_uqshl, EA_SCALABLE, REG_V19, REG_P3, 32, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_uqshl, EA_SCALABLE, REG_V0, REG_P2, 63, INS_OPTS_SCALABLE_D); + /// IF_SVE_EU_3A UQSHL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqshl, EA_SCALABLE, REG_V12, REG_P7, REG_V23, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLeftLogicalSaturate(Vector value, Vector count) => ShiftLeftLogicalSaturate(value, count); + + /// + /// svuint32_t svqshl[_u32]_m(svbool_t pg, svuint32_t op1, svint32_t op2) + /// UQSHL Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; UQSHL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svqshl[_u32]_x(svbool_t pg, svuint32_t op1, svint32_t op2) + /// UQSHL Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// UQSHLR Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; UQSHL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svqshl[_u32]_z(svbool_t pg, svuint32_t op1, svint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; UQSHL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; UQSHLR Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : left shifts UQSHL ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_uqshl, EA_SCALABLE, REG_V17, REG_P7, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_uqshl, EA_SCALABLE, REG_V18, REG_P0, 18, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_uqshl, EA_SCALABLE, REG_V19, REG_P3, 32, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_uqshl, EA_SCALABLE, REG_V0, REG_P2, 63, INS_OPTS_SCALABLE_D); + /// IF_SVE_EU_3A UQSHL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqshl, EA_SCALABLE, REG_V12, REG_P7, REG_V23, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLeftLogicalSaturate(Vector value, Vector count) => ShiftLeftLogicalSaturate(value, count); + + /// + /// svuint64_t svqshl[_u64]_m(svbool_t pg, svuint64_t op1, svint64_t op2) + /// UQSHL Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; UQSHL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svqshl[_u64]_x(svbool_t pg, svuint64_t op1, svint64_t op2) + /// UQSHL Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// UQSHLR Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; UQSHL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svqshl[_u64]_z(svbool_t pg, svuint64_t op1, svint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; UQSHL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; UQSHLR Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : left shifts UQSHL ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_uqshl, EA_SCALABLE, REG_V17, REG_P7, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_uqshl, EA_SCALABLE, REG_V18, REG_P0, 18, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_uqshl, EA_SCALABLE, REG_V19, REG_P3, 32, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_uqshl, EA_SCALABLE, REG_V0, REG_P2, 63, INS_OPTS_SCALABLE_D); + /// IF_SVE_EU_3A UQSHL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqshl, EA_SCALABLE, REG_V12, REG_P7, REG_V23, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLeftLogicalSaturate(Vector value, Vector count) => ShiftLeftLogicalSaturate(value, count); + + + /// ShiftLeftLogicalSaturateUnsigned : Saturating shift left unsigned + + /// + /// svuint8_t svqshlu[_n_s8]_m(svbool_t pg, svint8_t op1, uint64_t imm2) + /// SQSHLU Ztied1.B, Pg/M, Ztied1.B, #imm2 + /// MOVPRFX Zresult, Zop1; SQSHLU Zresult.B, Pg/M, Zresult.B, #imm2 + /// svuint8_t svqshlu[_n_s8]_x(svbool_t pg, svint8_t op1, uint64_t imm2) + /// SQSHLU Ztied1.B, Pg/M, Ztied1.B, #imm2 + /// MOVPRFX Zresult, Zop1; SQSHLU Zresult.B, Pg/M, Zresult.B, #imm2 + /// svuint8_t svqshlu[_n_s8]_z(svbool_t pg, svint8_t op1, uint64_t imm2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; SQSHLU Zresult.B, Pg/M, Zresult.B, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : left shifts SQSHLU ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqshlu, EA_SCALABLE, REG_V14, REG_P6, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sqshlu, EA_SCALABLE, REG_V15, REG_P0, 12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sqshlu, EA_SCALABLE, REG_V16, REG_P1, 15, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sqshlu, EA_SCALABLE, REG_V0, REG_P2, 31, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLeftLogicalSaturateUnsigned(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalSaturateUnsigned(value, count); + + /// + /// svuint16_t svqshlu[_n_s16]_m(svbool_t pg, svint16_t op1, uint64_t imm2) + /// SQSHLU Ztied1.H, Pg/M, Ztied1.H, #imm2 + /// MOVPRFX Zresult, Zop1; SQSHLU Zresult.H, Pg/M, Zresult.H, #imm2 + /// svuint16_t svqshlu[_n_s16]_x(svbool_t pg, svint16_t op1, uint64_t imm2) + /// SQSHLU Ztied1.H, Pg/M, Ztied1.H, #imm2 + /// MOVPRFX Zresult, Zop1; SQSHLU Zresult.H, Pg/M, Zresult.H, #imm2 + /// svuint16_t svqshlu[_n_s16]_z(svbool_t pg, svint16_t op1, uint64_t imm2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; SQSHLU Zresult.H, Pg/M, Zresult.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : left shifts SQSHLU ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqshlu, EA_SCALABLE, REG_V14, REG_P6, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sqshlu, EA_SCALABLE, REG_V15, REG_P0, 12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sqshlu, EA_SCALABLE, REG_V16, REG_P1, 15, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sqshlu, EA_SCALABLE, REG_V0, REG_P2, 31, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLeftLogicalSaturateUnsigned(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalSaturateUnsigned(value, count); + + /// + /// svuint32_t svqshlu[_n_s32]_m(svbool_t pg, svint32_t op1, uint64_t imm2) + /// SQSHLU Ztied1.S, Pg/M, Ztied1.S, #imm2 + /// MOVPRFX Zresult, Zop1; SQSHLU Zresult.S, Pg/M, Zresult.S, #imm2 + /// svuint32_t svqshlu[_n_s32]_x(svbool_t pg, svint32_t op1, uint64_t imm2) + /// SQSHLU Ztied1.S, Pg/M, Ztied1.S, #imm2 + /// MOVPRFX Zresult, Zop1; SQSHLU Zresult.S, Pg/M, Zresult.S, #imm2 + /// svuint32_t svqshlu[_n_s32]_z(svbool_t pg, svint32_t op1, uint64_t imm2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; SQSHLU Zresult.S, Pg/M, Zresult.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : left shifts SQSHLU ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqshlu, EA_SCALABLE, REG_V14, REG_P6, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sqshlu, EA_SCALABLE, REG_V15, REG_P0, 12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sqshlu, EA_SCALABLE, REG_V16, REG_P1, 15, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sqshlu, EA_SCALABLE, REG_V0, REG_P2, 31, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLeftLogicalSaturateUnsigned(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalSaturateUnsigned(value, count); + + /// + /// svuint64_t svqshlu[_n_s64]_m(svbool_t pg, svint64_t op1, uint64_t imm2) + /// SQSHLU Ztied1.D, Pg/M, Ztied1.D, #imm2 + /// MOVPRFX Zresult, Zop1; SQSHLU Zresult.D, Pg/M, Zresult.D, #imm2 + /// svuint64_t svqshlu[_n_s64]_x(svbool_t pg, svint64_t op1, uint64_t imm2) + /// SQSHLU Ztied1.D, Pg/M, Ztied1.D, #imm2 + /// MOVPRFX Zresult, Zop1; SQSHLU Zresult.D, Pg/M, Zresult.D, #imm2 + /// svuint64_t svqshlu[_n_s64]_z(svbool_t pg, svint64_t op1, uint64_t imm2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; SQSHLU Zresult.D, Pg/M, Zresult.D, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : left shifts SQSHLU ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqshlu, EA_SCALABLE, REG_V14, REG_P6, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sqshlu, EA_SCALABLE, REG_V15, REG_P0, 12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sqshlu, EA_SCALABLE, REG_V16, REG_P1, 15, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sqshlu, EA_SCALABLE, REG_V0, REG_P2, 31, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLeftLogicalSaturateUnsigned(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalSaturateUnsigned(value, count); + + + /// ShiftLeftLogicalWideningEven : Shift left long (bottom) + + /// + /// svint16_t svshllb[_n_s16](svint8_t op1, uint64_t imm2) + /// SSHLLB Zresult.H, Zop1.B, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_FR_2A SSHLLB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sshllb, EA_SCALABLE, REG_V0, REG_V1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sshllb, EA_SCALABLE, REG_V8, REG_V9, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sshllb, EA_SCALABLE, REG_V16, REG_V17, 8, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftLeftLogicalWideningEven(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalWideningEven(value, count); + + /// + /// svint32_t svshllb[_n_s32](svint16_t op1, uint64_t imm2) + /// SSHLLB Zresult.S, Zop1.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_FR_2A SSHLLB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sshllb, EA_SCALABLE, REG_V0, REG_V1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sshllb, EA_SCALABLE, REG_V8, REG_V9, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sshllb, EA_SCALABLE, REG_V16, REG_V17, 8, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftLeftLogicalWideningEven(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalWideningEven(value, count); + + /// + /// svint64_t svshllb[_n_s64](svint32_t op1, uint64_t imm2) + /// SSHLLB Zresult.D, Zop1.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_FR_2A SSHLLB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sshllb, EA_SCALABLE, REG_V0, REG_V1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sshllb, EA_SCALABLE, REG_V8, REG_V9, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sshllb, EA_SCALABLE, REG_V16, REG_V17, 8, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftLeftLogicalWideningEven(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalWideningEven(value, count); + + /// + /// svuint16_t svshllb[_n_u16](svuint8_t op1, uint64_t imm2) + /// USHLLB Zresult.H, Zop1.B, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_FR_2A USHLLB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_ushllb, EA_SCALABLE, REG_V4, REG_V5, 5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ushllb, EA_SCALABLE, REG_V12, REG_V13, 10, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ushllb, EA_SCALABLE, REG_V20, REG_V21, 24, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftLeftLogicalWideningEven(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalWideningEven(value, count); + + /// + /// svuint32_t svshllb[_n_u32](svuint16_t op1, uint64_t imm2) + /// USHLLB Zresult.S, Zop1.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_FR_2A USHLLB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_ushllb, EA_SCALABLE, REG_V4, REG_V5, 5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ushllb, EA_SCALABLE, REG_V12, REG_V13, 10, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ushllb, EA_SCALABLE, REG_V20, REG_V21, 24, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftLeftLogicalWideningEven(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalWideningEven(value, count); + + /// + /// svuint64_t svshllb[_n_u64](svuint32_t op1, uint64_t imm2) + /// USHLLB Zresult.D, Zop1.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_FR_2A USHLLB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_ushllb, EA_SCALABLE, REG_V4, REG_V5, 5, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ushllb, EA_SCALABLE, REG_V12, REG_V13, 10, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ushllb, EA_SCALABLE, REG_V20, REG_V21, 24, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftLeftLogicalWideningEven(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalWideningEven(value, count); + + + /// ShiftLeftLogicalWideningOdd : Shift left long (top) + + /// + /// svint16_t svshllt[_n_s16](svint8_t op1, uint64_t imm2) + /// SSHLLT Zresult.H, Zop1.B, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_FR_2A SSHLLT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sshllt, EA_SCALABLE, REG_V2, REG_V3, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sshllt, EA_SCALABLE, REG_V10, REG_V11, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sshllt, EA_SCALABLE, REG_V18, REG_V19, 16, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftLeftLogicalWideningOdd(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalWideningOdd(value, count); + + /// + /// svint32_t svshllt[_n_s32](svint16_t op1, uint64_t imm2) + /// SSHLLT Zresult.S, Zop1.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_FR_2A SSHLLT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sshllt, EA_SCALABLE, REG_V2, REG_V3, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sshllt, EA_SCALABLE, REG_V10, REG_V11, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sshllt, EA_SCALABLE, REG_V18, REG_V19, 16, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftLeftLogicalWideningOdd(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalWideningOdd(value, count); + + /// + /// svint64_t svshllt[_n_s64](svint32_t op1, uint64_t imm2) + /// SSHLLT Zresult.D, Zop1.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_FR_2A SSHLLT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sshllt, EA_SCALABLE, REG_V2, REG_V3, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sshllt, EA_SCALABLE, REG_V10, REG_V11, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sshllt, EA_SCALABLE, REG_V18, REG_V19, 16, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftLeftLogicalWideningOdd(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalWideningOdd(value, count); + + /// + /// svuint16_t svshllt[_n_u16](svuint8_t op1, uint64_t imm2) + /// USHLLT Zresult.H, Zop1.B, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_FR_2A USHLLT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_ushllt, EA_SCALABLE, REG_V6, REG_V7, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ushllt, EA_SCALABLE, REG_V14, REG_V15, 15, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ushllt, EA_SCALABLE, REG_V22, REG_V23, 31, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftLeftLogicalWideningOdd(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalWideningOdd(value, count); + + /// + /// svuint32_t svshllt[_n_u32](svuint16_t op1, uint64_t imm2) + /// USHLLT Zresult.S, Zop1.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_FR_2A USHLLT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_ushllt, EA_SCALABLE, REG_V6, REG_V7, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ushllt, EA_SCALABLE, REG_V14, REG_V15, 15, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ushllt, EA_SCALABLE, REG_V22, REG_V23, 31, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftLeftLogicalWideningOdd(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalWideningOdd(value, count); + + /// + /// svuint64_t svshllt[_n_u64](svuint32_t op1, uint64_t imm2) + /// USHLLT Zresult.D, Zop1.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_FR_2A USHLLT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_ushllt, EA_SCALABLE, REG_V6, REG_V7, 7, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ushllt, EA_SCALABLE, REG_V14, REG_V15, 15, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ushllt, EA_SCALABLE, REG_V22, REG_V23, 31, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftLeftLogicalWideningOdd(Vector value, [ConstantExpected] byte count) => ShiftLeftLogicalWideningOdd(value, count); + + + /// ShiftLogicalRounded : Rounding shift left + + /// + /// svuint8_t svrshl[_u8]_m(svbool_t pg, svuint8_t op1, svint8_t op2) + /// URSHL Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; URSHL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svrshl[_u8]_x(svbool_t pg, svuint8_t op1, svint8_t op2) + /// URSHL Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// URSHLR Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; URSHL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svrshl[_u8]_z(svbool_t pg, svuint8_t op1, svint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; URSHL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; URSHLR Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_EU_3A URSHL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_urshl, EA_SCALABLE, REG_V14, REG_P1, REG_V21, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLogicalRounded(Vector value, Vector count) => ShiftLogicalRounded(value, count); + + /// + /// svuint16_t svrshl[_u16]_m(svbool_t pg, svuint16_t op1, svint16_t op2) + /// URSHL Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; URSHL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svrshl[_u16]_x(svbool_t pg, svuint16_t op1, svint16_t op2) + /// URSHL Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// URSHLR Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; URSHL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svrshl[_u16]_z(svbool_t pg, svuint16_t op1, svint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; URSHL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; URSHLR Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_EU_3A URSHL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_urshl, EA_SCALABLE, REG_V14, REG_P1, REG_V21, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLogicalRounded(Vector value, Vector count) => ShiftLogicalRounded(value, count); + + /// + /// svuint32_t svrshl[_u32]_m(svbool_t pg, svuint32_t op1, svint32_t op2) + /// URSHL Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; URSHL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svrshl[_u32]_x(svbool_t pg, svuint32_t op1, svint32_t op2) + /// URSHL Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// URSHLR Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; URSHL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svrshl[_u32]_z(svbool_t pg, svuint32_t op1, svint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; URSHL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; URSHLR Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_EU_3A URSHL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_urshl, EA_SCALABLE, REG_V14, REG_P1, REG_V21, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLogicalRounded(Vector value, Vector count) => ShiftLogicalRounded(value, count); + + /// + /// svuint64_t svrshl[_u64]_m(svbool_t pg, svuint64_t op1, svint64_t op2) + /// URSHL Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; URSHL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svrshl[_u64]_x(svbool_t pg, svuint64_t op1, svint64_t op2) + /// URSHL Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// URSHLR Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; URSHL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svrshl[_u64]_z(svbool_t pg, svuint64_t op1, svint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; URSHL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; URSHLR Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_EU_3A URSHL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_urshl, EA_SCALABLE, REG_V14, REG_P1, REG_V21, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLogicalRounded(Vector value, Vector count) => ShiftLogicalRounded(value, count); + + + /// ShiftLogicalRoundedSaturate : Saturating rounding shift left + + /// + /// svuint8_t svqrshl[_u8]_m(svbool_t pg, svuint8_t op1, svint8_t op2) + /// UQRSHL Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; UQRSHL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svqrshl[_u8]_x(svbool_t pg, svuint8_t op1, svint8_t op2) + /// UQRSHL Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// UQRSHLR Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// MOVPRFX Zresult, Zop1; UQRSHL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svqrshl[_u8]_z(svbool_t pg, svuint8_t op1, svint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; UQRSHL Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; UQRSHLR Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_EU_3A UQRSHL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqrshl, EA_SCALABLE, REG_V10, REG_P5, REG_V25, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLogicalRoundedSaturate(Vector value, Vector count) => ShiftLogicalRoundedSaturate(value, count); + + /// + /// svuint16_t svqrshl[_u16]_m(svbool_t pg, svuint16_t op1, svint16_t op2) + /// UQRSHL Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; UQRSHL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svqrshl[_u16]_x(svbool_t pg, svuint16_t op1, svint16_t op2) + /// UQRSHL Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// UQRSHLR Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; UQRSHL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svqrshl[_u16]_z(svbool_t pg, svuint16_t op1, svint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; UQRSHL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; UQRSHLR Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_EU_3A UQRSHL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqrshl, EA_SCALABLE, REG_V10, REG_P5, REG_V25, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLogicalRoundedSaturate(Vector value, Vector count) => ShiftLogicalRoundedSaturate(value, count); + + /// + /// svuint32_t svqrshl[_u32]_m(svbool_t pg, svuint32_t op1, svint32_t op2) + /// UQRSHL Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; UQRSHL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svqrshl[_u32]_x(svbool_t pg, svuint32_t op1, svint32_t op2) + /// UQRSHL Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// UQRSHLR Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// MOVPRFX Zresult, Zop1; UQRSHL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svqrshl[_u32]_z(svbool_t pg, svuint32_t op1, svint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; UQRSHL Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; UQRSHLR Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_EU_3A UQRSHL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqrshl, EA_SCALABLE, REG_V10, REG_P5, REG_V25, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLogicalRoundedSaturate(Vector value, Vector count) => ShiftLogicalRoundedSaturate(value, count); + + /// + /// svuint64_t svqrshl[_u64]_m(svbool_t pg, svuint64_t op1, svint64_t op2) + /// UQRSHL Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; UQRSHL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svqrshl[_u64]_x(svbool_t pg, svuint64_t op1, svint64_t op2) + /// UQRSHL Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// UQRSHLR Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// MOVPRFX Zresult, Zop1; UQRSHL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svqrshl[_u64]_z(svbool_t pg, svuint64_t op1, svint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; UQRSHL Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; UQRSHLR Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_EU_3A UQRSHL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqrshl, EA_SCALABLE, REG_V10, REG_P5, REG_V25, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftLogicalRoundedSaturate(Vector value, Vector count) => ShiftLogicalRoundedSaturate(value, count); + + + /// ShiftRightAndInsert : Shift right and insert + + /// + /// svint8_t svsri[_n_s8](svint8_t op1, svint8_t op2, uint64_t imm3) + /// SRI Ztied1.B, Zop2.B, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FT_2A SRI ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftRightAndInsert(left, right, shift); + + /// + /// svint16_t svsri[_n_s16](svint16_t op1, svint16_t op2, uint64_t imm3) + /// SRI Ztied1.H, Zop2.H, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FT_2A SRI ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftRightAndInsert(left, right, shift); + + /// + /// svint32_t svsri[_n_s32](svint32_t op1, svint32_t op2, uint64_t imm3) + /// SRI Ztied1.S, Zop2.S, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FT_2A SRI ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftRightAndInsert(left, right, shift); + + /// + /// svint64_t svsri[_n_s64](svint64_t op1, svint64_t op2, uint64_t imm3) + /// SRI Ztied1.D, Zop2.D, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FT_2A SRI ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftRightAndInsert(left, right, shift); + + /// + /// svuint8_t svsri[_n_u8](svuint8_t op1, svuint8_t op2, uint64_t imm3) + /// SRI Ztied1.B, Zop2.B, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FT_2A SRI ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftRightAndInsert(left, right, shift); + + /// + /// svuint16_t svsri[_n_u16](svuint16_t op1, svuint16_t op2, uint64_t imm3) + /// SRI Ztied1.H, Zop2.H, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FT_2A SRI ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftRightAndInsert(left, right, shift); + + /// + /// svuint32_t svsri[_n_u32](svuint32_t op1, svuint32_t op2, uint64_t imm3) + /// SRI Ztied1.S, Zop2.S, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FT_2A SRI ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftRightAndInsert(left, right, shift); + + /// + /// svuint64_t svsri[_n_u64](svuint64_t op1, svuint64_t op2, uint64_t imm3) + /// SRI Ztied1.D, Zop2.D, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FT_2A SRI ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_sri, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift) => ShiftRightAndInsert(left, right, shift); + + + /// ShiftRightArithmeticAdd : Shift right and accumulate + + /// + /// svint8_t svsra[_n_s8](svint8_t op1, svint8_t op2, uint64_t imm3) + /// SSRA Ztied1.B, Zop2.B, #imm3 + /// MOVPRFX Zresult, Zop1; SSRA Zresult.B, Zop2.B, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FU_2A SSRA ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftRightArithmeticAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticAdd(addend, value, count); + + /// + /// svint16_t svsra[_n_s16](svint16_t op1, svint16_t op2, uint64_t imm3) + /// SSRA Ztied1.H, Zop2.H, #imm3 + /// MOVPRFX Zresult, Zop1; SSRA Zresult.H, Zop2.H, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FU_2A SSRA ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftRightArithmeticAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticAdd(addend, value, count); + + /// + /// svint32_t svsra[_n_s32](svint32_t op1, svint32_t op2, uint64_t imm3) + /// SSRA Ztied1.S, Zop2.S, #imm3 + /// MOVPRFX Zresult, Zop1; SSRA Zresult.S, Zop2.S, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FU_2A SSRA ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftRightArithmeticAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticAdd(addend, value, count); + + /// + /// svint64_t svsra[_n_s64](svint64_t op1, svint64_t op2, uint64_t imm3) + /// SSRA Ztied1.D, Zop2.D, #imm3 + /// MOVPRFX Zresult, Zop1; SSRA Zresult.D, Zop2.D, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FU_2A SSRA ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_ssra, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftRightArithmeticAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticAdd(addend, value, count); + + + /// ShiftRightArithmeticNarrowingSaturateEven : Saturating shift right narrow (bottom) + + /// + /// svint8_t svqshrnb[_n_s16](svint16_t op1, uint64_t imm2) + /// SQSHRNB Zresult.B, Zop1.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SQSHRNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqshrnb, EA_SCALABLE, REG_V16, REG_V17, 8, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateEven(value, count); + + /// + /// svint16_t svqshrnb[_n_s32](svint32_t op1, uint64_t imm2) + /// SQSHRNB Zresult.H, Zop1.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SQSHRNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqshrnb, EA_SCALABLE, REG_V16, REG_V17, 8, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateEven(value, count); + + /// + /// svint32_t svqshrnb[_n_s64](svint64_t op1, uint64_t imm2) + /// SQSHRNB Zresult.S, Zop1.D, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SQSHRNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqshrnb, EA_SCALABLE, REG_V16, REG_V17, 8, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateEven(value, count); + + /// + /// svuint8_t svqshrnb[_n_u16](svuint16_t op1, uint64_t imm2) + /// UQSHRNB Zresult.B, Zop1.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A UQSHRNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_uqshrnb, EA_SCALABLE, REG_V28, REG_V29, 32, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateEven(value, count); + + /// + /// svuint16_t svqshrnb[_n_u32](svuint32_t op1, uint64_t imm2) + /// UQSHRNB Zresult.H, Zop1.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A UQSHRNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_uqshrnb, EA_SCALABLE, REG_V28, REG_V29, 32, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateEven(value, count); + + /// + /// svuint32_t svqshrnb[_n_u64](svuint64_t op1, uint64_t imm2) + /// UQSHRNB Zresult.S, Zop1.D, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A UQSHRNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_uqshrnb, EA_SCALABLE, REG_V28, REG_V29, 32, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateEven(value, count); + + + /// ShiftRightArithmeticNarrowingSaturateOdd : Saturating shift right narrow (top) + + /// + /// svint8_t svqshrnt[_n_s16](svint8_t even, svint16_t op1, uint64_t imm2) + /// SQSHRNT Ztied.B, Zop1.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SQSHRNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqshrnt, EA_SCALABLE, REG_V18, REG_V19, 6, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateOdd(even, value, count); + + /// + /// svint16_t svqshrnt[_n_s32](svint16_t even, svint32_t op1, uint64_t imm2) + /// SQSHRNT Ztied.H, Zop1.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SQSHRNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqshrnt, EA_SCALABLE, REG_V18, REG_V19, 6, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateOdd(even, value, count); + + /// + /// svint32_t svqshrnt[_n_s64](svint32_t even, svint64_t op1, uint64_t imm2) + /// SQSHRNT Ztied.S, Zop1.D, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SQSHRNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqshrnt, EA_SCALABLE, REG_V18, REG_V19, 6, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateOdd(even, value, count); + + /// + /// svuint8_t svqshrnt[_n_u16](svuint8_t even, svuint16_t op1, uint64_t imm2) + /// UQSHRNT Ztied.B, Zop1.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A UQSHRNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_uqshrnt, EA_SCALABLE, REG_V30, REG_V31, 8, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateOdd(even, value, count); + + /// + /// svuint16_t svqshrnt[_n_u32](svuint16_t even, svuint32_t op1, uint64_t imm2) + /// UQSHRNT Ztied.H, Zop1.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A UQSHRNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_uqshrnt, EA_SCALABLE, REG_V30, REG_V31, 8, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateOdd(even, value, count); + + /// + /// svuint32_t svqshrnt[_n_u64](svuint32_t even, svuint64_t op1, uint64_t imm2) + /// UQSHRNT Ztied.S, Zop1.D, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A UQSHRNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_uqshrnt, EA_SCALABLE, REG_V30, REG_V31, 8, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateOdd(even, value, count); + + + /// ShiftRightArithmeticNarrowingSaturateUnsignedEven : Saturating shift right unsigned narrow (bottom) + + /// + /// svuint8_t svqshrunb[_n_s16](svint16_t op1, uint64_t imm2) + /// SQSHRUNB Zresult.B, Zop1.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SQSHRUNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqshrunb, EA_SCALABLE, REG_V20, REG_V21, 13, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedEven(value, count); + + /// + /// svuint16_t svqshrunb[_n_s32](svint32_t op1, uint64_t imm2) + /// SQSHRUNB Zresult.H, Zop1.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SQSHRUNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqshrunb, EA_SCALABLE, REG_V20, REG_V21, 13, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedEven(value, count); + + /// + /// svuint32_t svqshrunb[_n_s64](svint64_t op1, uint64_t imm2) + /// SQSHRUNB Zresult.S, Zop1.D, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SQSHRUNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqshrunb, EA_SCALABLE, REG_V20, REG_V21, 13, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedEven(value, count); + + + /// ShiftRightArithmeticNarrowingSaturateUnsignedOdd : Saturating shift right unsigned narrow (top) + + /// + /// svuint8_t svqshrunt[_n_s16](svuint8_t even, svint16_t op1, uint64_t imm2) + /// SQSHRUNT Ztied.B, Zop1.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SQSHRUNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqshrunt, EA_SCALABLE, REG_V22, REG_V23, 16, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedOdd(even, value, count); + + /// + /// svuint16_t svqshrunt[_n_s32](svuint16_t even, svint32_t op1, uint64_t imm2) + /// SQSHRUNT Ztied.H, Zop1.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SQSHRUNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqshrunt, EA_SCALABLE, REG_V22, REG_V23, 16, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedOdd(even, value, count); + + /// + /// svuint32_t svqshrunt[_n_s64](svuint32_t even, svint64_t op1, uint64_t imm2) + /// SQSHRUNT Ztied.S, Zop1.D, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SQSHRUNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqshrunt, EA_SCALABLE, REG_V22, REG_V23, 16, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedOdd(even, value, count); + + + /// ShiftRightArithmeticRounded : Rounding shift right + + /// + /// svint8_t svrshr[_n_s8]_m(svbool_t pg, svint8_t op1, uint64_t imm2) + /// SRSHR Ztied1.B, Pg/M, Ztied1.B, #imm2 + /// MOVPRFX Zresult, Zop1; SRSHR Zresult.B, Pg/M, Zresult.B, #imm2 + /// svint8_t svrshr[_n_s8]_x(svbool_t pg, svint8_t op1, uint64_t imm2) + /// SRSHR Ztied1.B, Pg/M, Ztied1.B, #imm2 + /// MOVPRFX Zresult, Zop1; SRSHR Zresult.B, Pg/M, Zresult.B, #imm2 + /// svint8_t svrshr[_n_s8]_z(svbool_t pg, svint8_t op1, uint64_t imm2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; SRSHR Zresult.B, Pg/M, Zresult.B, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : right shifts SRSHR ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_srshr, EA_SCALABLE, REG_V6, REG_P3, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_srshr, EA_SCALABLE, REG_V7, REG_P4, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_srshr, EA_SCALABLE, REG_V21, REG_P5, 21, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_srshr, EA_SCALABLE, REG_V22, REG_P6, 63, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftRightArithmeticRounded(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRounded(value, count); + + /// + /// svint16_t svrshr[_n_s16]_m(svbool_t pg, svint16_t op1, uint64_t imm2) + /// SRSHR Ztied1.H, Pg/M, Ztied1.H, #imm2 + /// MOVPRFX Zresult, Zop1; SRSHR Zresult.H, Pg/M, Zresult.H, #imm2 + /// svint16_t svrshr[_n_s16]_x(svbool_t pg, svint16_t op1, uint64_t imm2) + /// SRSHR Ztied1.H, Pg/M, Ztied1.H, #imm2 + /// MOVPRFX Zresult, Zop1; SRSHR Zresult.H, Pg/M, Zresult.H, #imm2 + /// svint16_t svrshr[_n_s16]_z(svbool_t pg, svint16_t op1, uint64_t imm2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; SRSHR Zresult.H, Pg/M, Zresult.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : right shifts SRSHR ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_srshr, EA_SCALABLE, REG_V6, REG_P3, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_srshr, EA_SCALABLE, REG_V7, REG_P4, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_srshr, EA_SCALABLE, REG_V21, REG_P5, 21, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_srshr, EA_SCALABLE, REG_V22, REG_P6, 63, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftRightArithmeticRounded(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRounded(value, count); + + /// + /// svint32_t svrshr[_n_s32]_m(svbool_t pg, svint32_t op1, uint64_t imm2) + /// SRSHR Ztied1.S, Pg/M, Ztied1.S, #imm2 + /// MOVPRFX Zresult, Zop1; SRSHR Zresult.S, Pg/M, Zresult.S, #imm2 + /// svint32_t svrshr[_n_s32]_x(svbool_t pg, svint32_t op1, uint64_t imm2) + /// SRSHR Ztied1.S, Pg/M, Ztied1.S, #imm2 + /// MOVPRFX Zresult, Zop1; SRSHR Zresult.S, Pg/M, Zresult.S, #imm2 + /// svint32_t svrshr[_n_s32]_z(svbool_t pg, svint32_t op1, uint64_t imm2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; SRSHR Zresult.S, Pg/M, Zresult.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : right shifts SRSHR ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_srshr, EA_SCALABLE, REG_V6, REG_P3, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_srshr, EA_SCALABLE, REG_V7, REG_P4, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_srshr, EA_SCALABLE, REG_V21, REG_P5, 21, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_srshr, EA_SCALABLE, REG_V22, REG_P6, 63, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftRightArithmeticRounded(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRounded(value, count); + + /// + /// svint64_t svrshr[_n_s64]_m(svbool_t pg, svint64_t op1, uint64_t imm2) + /// SRSHR Ztied1.D, Pg/M, Ztied1.D, #imm2 + /// MOVPRFX Zresult, Zop1; SRSHR Zresult.D, Pg/M, Zresult.D, #imm2 + /// svint64_t svrshr[_n_s64]_x(svbool_t pg, svint64_t op1, uint64_t imm2) + /// SRSHR Ztied1.D, Pg/M, Ztied1.D, #imm2 + /// MOVPRFX Zresult, Zop1; SRSHR Zresult.D, Pg/M, Zresult.D, #imm2 + /// svint64_t svrshr[_n_s64]_z(svbool_t pg, svint64_t op1, uint64_t imm2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; SRSHR Zresult.D, Pg/M, Zresult.D, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : right shifts SRSHR ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_srshr, EA_SCALABLE, REG_V6, REG_P3, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_srshr, EA_SCALABLE, REG_V7, REG_P4, 3, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_srshr, EA_SCALABLE, REG_V21, REG_P5, 21, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_srshr, EA_SCALABLE, REG_V22, REG_P6, 63, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftRightArithmeticRounded(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRounded(value, count); + + + /// ShiftRightArithmeticRoundedAdd : Rounding shift right and accumulate + + /// + /// svint8_t svrsra[_n_s8](svint8_t op1, svint8_t op2, uint64_t imm3) + /// SRSRA Ztied1.B, Zop2.B, #imm3 + /// MOVPRFX Zresult, Zop1; SRSRA Zresult.B, Zop2.B, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FU_2A SRSRA ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftRightArithmeticRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedAdd(addend, value, count); + + /// + /// svint16_t svrsra[_n_s16](svint16_t op1, svint16_t op2, uint64_t imm3) + /// SRSRA Ztied1.H, Zop2.H, #imm3 + /// MOVPRFX Zresult, Zop1; SRSRA Zresult.H, Zop2.H, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FU_2A SRSRA ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftRightArithmeticRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedAdd(addend, value, count); + + /// + /// svint32_t svrsra[_n_s32](svint32_t op1, svint32_t op2, uint64_t imm3) + /// SRSRA Ztied1.S, Zop2.S, #imm3 + /// MOVPRFX Zresult, Zop1; SRSRA Zresult.S, Zop2.S, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FU_2A SRSRA ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftRightArithmeticRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedAdd(addend, value, count); + + /// + /// svint64_t svrsra[_n_s64](svint64_t op1, svint64_t op2, uint64_t imm3) + /// SRSRA Ztied1.D, Zop2.D, #imm3 + /// MOVPRFX Zresult, Zop1; SRSRA Zresult.D, Zop2.D, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FU_2A SRSRA ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_srsra, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftRightArithmeticRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedAdd(addend, value, count); + + + /// ShiftRightArithmeticRoundedNarrowingSaturateEven : Saturating rounding shift right narrow (bottom) + + /// + /// svint8_t svqrshrnb[_n_s16](svint16_t op1, uint64_t imm2) + /// SQRSHRNB Zresult.B, Zop1.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SQRSHRNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqrshrnb, EA_SCALABLE, REG_V8, REG_V9, 3, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedNarrowingSaturateEven(value, count); + + /// + /// svint16_t svqrshrnb[_n_s32](svint32_t op1, uint64_t imm2) + /// SQRSHRNB Zresult.H, Zop1.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SQRSHRNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqrshrnb, EA_SCALABLE, REG_V8, REG_V9, 3, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedNarrowingSaturateEven(value, count); + + /// + /// svint32_t svqrshrnb[_n_s64](svint64_t op1, uint64_t imm2) + /// SQRSHRNB Zresult.S, Zop1.D, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SQRSHRNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqrshrnb, EA_SCALABLE, REG_V8, REG_V9, 3, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedNarrowingSaturateEven(value, count); + + + /// ShiftRightArithmeticRoundedNarrowingSaturateOdd : Saturating rounding shift right narrow (top) + + /// + /// svint8_t svqrshrnt[_n_s16](svint8_t even, svint16_t op1, uint64_t imm2) + /// SQRSHRNT Ztied.B, Zop1.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SQRSHRNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqrshrnt, EA_SCALABLE, REG_V10, REG_V11, 4, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedNarrowingSaturateOdd(even, value, count); + + /// + /// svint16_t svqrshrnt[_n_s32](svint16_t even, svint32_t op1, uint64_t imm2) + /// SQRSHRNT Ztied.H, Zop1.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SQRSHRNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqrshrnt, EA_SCALABLE, REG_V10, REG_V11, 4, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedNarrowingSaturateOdd(even, value, count); + + /// + /// svint32_t svqrshrnt[_n_s64](svint32_t even, svint64_t op1, uint64_t imm2) + /// SQRSHRNT Ztied.S, Zop1.D, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SQRSHRNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqrshrnt, EA_SCALABLE, REG_V10, REG_V11, 4, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedNarrowingSaturateOdd(even, value, count); + + + /// ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven : Saturating rounding shift right unsigned narrow (bottom) + + /// + /// svuint8_t svqrshrunb[_n_s16](svint16_t op1, uint64_t imm2) + /// SQRSHRUNB Zresult.B, Zop1.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SQRSHRUNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqrshrunb, EA_SCALABLE, REG_V12, REG_V13, 5, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven(value, count); + + /// + /// svuint16_t svqrshrunb[_n_s32](svint32_t op1, uint64_t imm2) + /// SQRSHRUNB Zresult.H, Zop1.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SQRSHRUNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqrshrunb, EA_SCALABLE, REG_V12, REG_V13, 5, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven(value, count); + + /// + /// svuint32_t svqrshrunb[_n_s64](svint64_t op1, uint64_t imm2) + /// SQRSHRUNB Zresult.S, Zop1.D, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SQRSHRUNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqrshrunb, EA_SCALABLE, REG_V12, REG_V13, 5, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven(value, count); + + + /// ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd : Saturating rounding shift right unsigned narrow (top) + + /// + /// svuint8_t svqrshrunt[_n_s16](svuint8_t even, svint16_t op1, uint64_t imm2) + /// SQRSHRUNT Ztied.B, Zop1.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SQRSHRUNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqrshrunt, EA_SCALABLE, REG_V14, REG_V15, 8, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd(even, value, count); + + /// + /// svuint16_t svqrshrunt[_n_s32](svuint16_t even, svint32_t op1, uint64_t imm2) + /// SQRSHRUNT Ztied.H, Zop1.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SQRSHRUNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqrshrunt, EA_SCALABLE, REG_V14, REG_V15, 8, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd(even, value, count); + + /// + /// svuint32_t svqrshrunt[_n_s64](svuint32_t even, svint64_t op1, uint64_t imm2) + /// SQRSHRUNT Ztied.S, Zop1.D, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SQRSHRUNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_sqrshrunt, EA_SCALABLE, REG_V14, REG_V15, 8, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd(even, value, count); + + + /// ShiftRightLogicalAdd : Shift right and accumulate + + /// + /// svuint8_t svsra[_n_u8](svuint8_t op1, svuint8_t op2, uint64_t imm3) + /// USRA Ztied1.B, Zop2.B, #imm3 + /// MOVPRFX Zresult, Zop1; USRA Zresult.B, Zop2.B, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FU_2A USRA ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftRightLogicalAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalAdd(addend, value, count); + + /// + /// svuint16_t svsra[_n_u16](svuint16_t op1, svuint16_t op2, uint64_t imm3) + /// USRA Ztied1.H, Zop2.H, #imm3 + /// MOVPRFX Zresult, Zop1; USRA Zresult.H, Zop2.H, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FU_2A USRA ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftRightLogicalAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalAdd(addend, value, count); + + /// + /// svuint32_t svsra[_n_u32](svuint32_t op1, svuint32_t op2, uint64_t imm3) + /// USRA Ztied1.S, Zop2.S, #imm3 + /// MOVPRFX Zresult, Zop1; USRA Zresult.S, Zop2.S, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FU_2A USRA ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftRightLogicalAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalAdd(addend, value, count); + + /// + /// svuint64_t svsra[_n_u64](svuint64_t op1, svuint64_t op2, uint64_t imm3) + /// USRA Ztied1.D, Zop2.D, #imm3 + /// MOVPRFX Zresult, Zop1; USRA Zresult.D, Zop2.D, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FU_2A USRA ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_usra, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftRightLogicalAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalAdd(addend, value, count); + + + /// ShiftRightLogicalNarrowingEven : Shift right narrow (bottom) + + /// + /// svint8_t svshrnb[_n_s16](svint16_t op1, uint64_t imm2) + /// SHRNB Zresult.B, Zop1.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SHRNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_shrnb, EA_SCALABLE, REG_V4, REG_V5, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftRightLogicalNarrowingEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalNarrowingEven(value, count); + + /// + /// svint16_t svshrnb[_n_s32](svint32_t op1, uint64_t imm2) + /// SHRNB Zresult.H, Zop1.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SHRNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_shrnb, EA_SCALABLE, REG_V4, REG_V5, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftRightLogicalNarrowingEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalNarrowingEven(value, count); + + /// + /// svint32_t svshrnb[_n_s64](svint64_t op1, uint64_t imm2) + /// SHRNB Zresult.S, Zop1.D, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SHRNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_shrnb, EA_SCALABLE, REG_V4, REG_V5, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftRightLogicalNarrowingEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalNarrowingEven(value, count); + + /// + /// svuint8_t svshrnb[_n_u16](svuint16_t op1, uint64_t imm2) + /// SHRNB Zresult.B, Zop1.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SHRNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_shrnb, EA_SCALABLE, REG_V4, REG_V5, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftRightLogicalNarrowingEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalNarrowingEven(value, count); + + /// + /// svuint16_t svshrnb[_n_u32](svuint32_t op1, uint64_t imm2) + /// SHRNB Zresult.H, Zop1.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SHRNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_shrnb, EA_SCALABLE, REG_V4, REG_V5, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftRightLogicalNarrowingEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalNarrowingEven(value, count); + + /// + /// svuint32_t svshrnb[_n_u64](svuint64_t op1, uint64_t imm2) + /// SHRNB Zresult.S, Zop1.D, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SHRNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_shrnb, EA_SCALABLE, REG_V4, REG_V5, 1, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ShiftRightLogicalNarrowingEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalNarrowingEven(value, count); + + + /// ShiftRightLogicalNarrowingOdd : Shift right narrow (top) + + /// + /// svint8_t svshrnt[_n_s16](svint8_t even, svint16_t op1, uint64_t imm2) + /// SHRNT Ztied.B, Zop1.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SHRNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_shrnt, EA_SCALABLE, REG_V6, REG_V7, 2, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ShiftRightLogicalNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalNarrowingOdd(even, value, count); + + /// + /// svint16_t svshrnt[_n_s32](svint16_t even, svint32_t op1, uint64_t imm2) + /// SHRNT Ztied.H, Zop1.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SHRNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_shrnt, EA_SCALABLE, REG_V6, REG_V7, 2, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ShiftRightLogicalNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalNarrowingOdd(even, value, count); + + /// + /// svint32_t svshrnt[_n_s64](svint32_t even, svint64_t op1, uint64_t imm2) + /// SHRNT Ztied.S, Zop1.D, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SHRNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_shrnt, EA_SCALABLE, REG_V6, REG_V7, 2, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ShiftRightLogicalNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalNarrowingOdd(even, value, count); + + /// + /// svuint8_t svshrnt[_n_u16](svuint8_t even, svuint16_t op1, uint64_t imm2) + /// SHRNT Ztied.B, Zop1.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SHRNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_shrnt, EA_SCALABLE, REG_V6, REG_V7, 2, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ShiftRightLogicalNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalNarrowingOdd(even, value, count); + + /// + /// svuint16_t svshrnt[_n_u32](svuint16_t even, svuint32_t op1, uint64_t imm2) + /// SHRNT Ztied.H, Zop1.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SHRNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_shrnt, EA_SCALABLE, REG_V6, REG_V7, 2, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ShiftRightLogicalNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalNarrowingOdd(even, value, count); + + /// + /// svuint32_t svshrnt[_n_u64](svuint32_t even, svuint64_t op1, uint64_t imm2) + /// SHRNT Ztied.S, Zop1.D, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A SHRNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_shrnt, EA_SCALABLE, REG_V6, REG_V7, 2, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ShiftRightLogicalNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalNarrowingOdd(even, value, count); + + + /// ShiftRightLogicalRounded : Rounding shift right + + /// + /// svuint8_t svrshr[_n_u8]_m(svbool_t pg, svuint8_t op1, uint64_t imm2) + /// URSHR Ztied1.B, Pg/M, Ztied1.B, #imm2 + /// MOVPRFX Zresult, Zop1; URSHR Zresult.B, Pg/M, Zresult.B, #imm2 + /// svuint8_t svrshr[_n_u8]_x(svbool_t pg, svuint8_t op1, uint64_t imm2) + /// URSHR Ztied1.B, Pg/M, Ztied1.B, #imm2 + /// MOVPRFX Zresult, Zop1; URSHR Zresult.B, Pg/M, Zresult.B, #imm2 + /// svuint8_t svrshr[_n_u8]_z(svbool_t pg, svuint8_t op1, uint64_t imm2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; URSHR Zresult.B, Pg/M, Zresult.B, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : right shifts URSHR ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_urshr, EA_SCALABLE, REG_V31, REG_P7, 64, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftRightLogicalRounded(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRounded(value, count); + + /// + /// svuint16_t svrshr[_n_u16]_m(svbool_t pg, svuint16_t op1, uint64_t imm2) + /// URSHR Ztied1.H, Pg/M, Ztied1.H, #imm2 + /// MOVPRFX Zresult, Zop1; URSHR Zresult.H, Pg/M, Zresult.H, #imm2 + /// svuint16_t svrshr[_n_u16]_x(svbool_t pg, svuint16_t op1, uint64_t imm2) + /// URSHR Ztied1.H, Pg/M, Ztied1.H, #imm2 + /// MOVPRFX Zresult, Zop1; URSHR Zresult.H, Pg/M, Zresult.H, #imm2 + /// svuint16_t svrshr[_n_u16]_z(svbool_t pg, svuint16_t op1, uint64_t imm2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; URSHR Zresult.H, Pg/M, Zresult.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : right shifts URSHR ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_urshr, EA_SCALABLE, REG_V31, REG_P7, 64, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftRightLogicalRounded(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRounded(value, count); + + /// + /// svuint32_t svrshr[_n_u32]_m(svbool_t pg, svuint32_t op1, uint64_t imm2) + /// URSHR Ztied1.S, Pg/M, Ztied1.S, #imm2 + /// MOVPRFX Zresult, Zop1; URSHR Zresult.S, Pg/M, Zresult.S, #imm2 + /// svuint32_t svrshr[_n_u32]_x(svbool_t pg, svuint32_t op1, uint64_t imm2) + /// URSHR Ztied1.S, Pg/M, Ztied1.S, #imm2 + /// MOVPRFX Zresult, Zop1; URSHR Zresult.S, Pg/M, Zresult.S, #imm2 + /// svuint32_t svrshr[_n_u32]_z(svbool_t pg, svuint32_t op1, uint64_t imm2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; URSHR Zresult.S, Pg/M, Zresult.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : right shifts URSHR ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_urshr, EA_SCALABLE, REG_V31, REG_P7, 64, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftRightLogicalRounded(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRounded(value, count); + + /// + /// svuint64_t svrshr[_n_u64]_m(svbool_t pg, svuint64_t op1, uint64_t imm2) + /// URSHR Ztied1.D, Pg/M, Ztied1.D, #imm2 + /// MOVPRFX Zresult, Zop1; URSHR Zresult.D, Pg/M, Zresult.D, #imm2 + /// svuint64_t svrshr[_n_u64]_x(svbool_t pg, svuint64_t op1, uint64_t imm2) + /// URSHR Ztied1.D, Pg/M, Ztied1.D, #imm2 + /// MOVPRFX Zresult, Zop1; URSHR Zresult.D, Pg/M, Zresult.D, #imm2 + /// svuint64_t svrshr[_n_u64]_z(svbool_t pg, svuint64_t op1, uint64_t imm2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; URSHR Zresult.D, Pg/M, Zresult.D, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_AM_2A : right shifts URSHR ., /M, ., # + /// theEmitter->emitIns_R_R_I(INS_sve_urshr, EA_SCALABLE, REG_V31, REG_P7, 64, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ShiftRightLogicalRounded(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRounded(value, count); + + + /// ShiftRightLogicalRoundedAdd : Rounding shift right and accumulate + + /// + /// svuint8_t svrsra[_n_u8](svuint8_t op1, svuint8_t op2, uint64_t imm3) + /// URSRA Ztied1.B, Zop2.B, #imm3 + /// MOVPRFX Zresult, Zop1; URSRA Zresult.B, Zop2.B, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FU_2A URSRA ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftRightLogicalRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); + + /// + /// svuint16_t svrsra[_n_u16](svuint16_t op1, svuint16_t op2, uint64_t imm3) + /// URSRA Ztied1.H, Zop2.H, #imm3 + /// MOVPRFX Zresult, Zop1; URSRA Zresult.H, Zop2.H, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FU_2A URSRA ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftRightLogicalRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); + + /// + /// svuint32_t svrsra[_n_u32](svuint32_t op1, svuint32_t op2, uint64_t imm3) + /// URSRA Ztied1.S, Zop2.S, #imm3 + /// MOVPRFX Zresult, Zop1; URSRA Zresult.S, Zop2.S, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FU_2A URSRA ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftRightLogicalRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); + + /// + /// svuint64_t svrsra[_n_u64](svuint64_t op1, svuint64_t op2, uint64_t imm3) + /// URSRA Ztied1.D, Zop2.D, #imm3 + /// MOVPRFX Zresult, Zop1; URSRA Zresult.D, Zop2.D, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_FU_2A URSRA ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V31, REG_V31, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 3, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 4, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V31, REG_V31, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 7, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 8, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V31, REG_V31, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 17, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 16, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V0, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V31, REG_V31, 64, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 31, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_ursra, EA_SCALABLE, REG_V0, REG_V31, 32, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ShiftRightLogicalRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); + + + /// ShiftRightLogicalRoundedNarrowingEven : Rounding shift right narrow (bottom) + + /// + /// svint8_t svrshrnb[_n_s16](svint16_t op1, uint64_t imm2) + /// RSHRNB Zresult.B, Zop1.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A RSHRNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_rshrnb, EA_SCALABLE, REG_V0, REG_V1, 1, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingEven(value, count); + + /// + /// svint16_t svrshrnb[_n_s32](svint32_t op1, uint64_t imm2) + /// RSHRNB Zresult.H, Zop1.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A RSHRNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_rshrnb, EA_SCALABLE, REG_V0, REG_V1, 1, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingEven(value, count); + + /// + /// svint32_t svrshrnb[_n_s64](svint64_t op1, uint64_t imm2) + /// RSHRNB Zresult.S, Zop1.D, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A RSHRNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_rshrnb, EA_SCALABLE, REG_V0, REG_V1, 1, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingEven(value, count); + + /// + /// svuint8_t svrshrnb[_n_u16](svuint16_t op1, uint64_t imm2) + /// RSHRNB Zresult.B, Zop1.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A RSHRNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_rshrnb, EA_SCALABLE, REG_V0, REG_V1, 1, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingEven(value, count); + + /// + /// svuint16_t svrshrnb[_n_u32](svuint32_t op1, uint64_t imm2) + /// RSHRNB Zresult.H, Zop1.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A RSHRNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_rshrnb, EA_SCALABLE, REG_V0, REG_V1, 1, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingEven(value, count); + + /// + /// svuint32_t svrshrnb[_n_u64](svuint64_t op1, uint64_t imm2) + /// RSHRNB Zresult.S, Zop1.D, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A RSHRNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_rshrnb, EA_SCALABLE, REG_V0, REG_V1, 1, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingEven(value, count); + + + /// ShiftRightLogicalRoundedNarrowingOdd : Rounding shift right narrow (top) + + /// + /// svint8_t svrshrnt[_n_s16](svint8_t even, svint16_t op1, uint64_t imm2) + /// RSHRNT Ztied.B, Zop1.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A RSHRNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_rshrnt, EA_SCALABLE, REG_V2, REG_V3, 1, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingOdd(even, value, count); + + /// + /// svint16_t svrshrnt[_n_s32](svint16_t even, svint32_t op1, uint64_t imm2) + /// RSHRNT Ztied.H, Zop1.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A RSHRNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_rshrnt, EA_SCALABLE, REG_V2, REG_V3, 1, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingOdd(even, value, count); + + /// + /// svint32_t svrshrnt[_n_s64](svint32_t even, svint64_t op1, uint64_t imm2) + /// RSHRNT Ztied.S, Zop1.D, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A RSHRNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_rshrnt, EA_SCALABLE, REG_V2, REG_V3, 1, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingOdd(even, value, count); + + /// + /// svuint8_t svrshrnt[_n_u16](svuint8_t even, svuint16_t op1, uint64_t imm2) + /// RSHRNT Ztied.B, Zop1.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A RSHRNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_rshrnt, EA_SCALABLE, REG_V2, REG_V3, 1, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingOdd(even, value, count); + + /// + /// svuint16_t svrshrnt[_n_u32](svuint16_t even, svuint32_t op1, uint64_t imm2) + /// RSHRNT Ztied.H, Zop1.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A RSHRNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_rshrnt, EA_SCALABLE, REG_V2, REG_V3, 1, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingOdd(even, value, count); + + /// + /// svuint32_t svrshrnt[_n_u64](svuint32_t even, svuint64_t op1, uint64_t imm2) + /// RSHRNT Ztied.S, Zop1.D, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A RSHRNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_rshrnt, EA_SCALABLE, REG_V2, REG_V3, 1, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingOdd(even, value, count); + + + /// ShiftRightLogicalRoundedNarrowingSaturateEven : Saturating rounding shift right narrow (bottom) + + /// + /// svuint8_t svqrshrnb[_n_u16](svuint16_t op1, uint64_t imm2) + /// UQRSHRNB Zresult.B, Zop1.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A UQRSHRNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_uqrshrnb, EA_SCALABLE, REG_V24, REG_V25, 7, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingSaturateEven(value, count); + + /// + /// svuint16_t svqrshrnb[_n_u32](svuint32_t op1, uint64_t imm2) + /// UQRSHRNB Zresult.H, Zop1.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A UQRSHRNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_uqrshrnb, EA_SCALABLE, REG_V24, REG_V25, 7, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingSaturateEven(value, count); + + /// + /// svuint32_t svqrshrnb[_n_u64](svuint64_t op1, uint64_t imm2) + /// UQRSHRNB Zresult.S, Zop1.D, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A UQRSHRNB ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_uqrshrnb, EA_SCALABLE, REG_V24, REG_V25, 7, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingSaturateEven(value, count); + + + /// ShiftRightLogicalRoundedNarrowingSaturateOdd : Saturating rounding shift right narrow (top) + + /// + /// svuint8_t svqrshrnt[_n_u16](svuint8_t even, svuint16_t op1, uint64_t imm2) + /// UQRSHRNT Ztied.B, Zop1.H, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A UQRSHRNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_uqrshrnt, EA_SCALABLE, REG_V26, REG_V27, 16, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingSaturateOdd(even, value, count); + + /// + /// svuint16_t svqrshrnt[_n_u32](svuint16_t even, svuint32_t op1, uint64_t imm2) + /// UQRSHRNT Ztied.H, Zop1.S, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A UQRSHRNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_uqrshrnt, EA_SCALABLE, REG_V26, REG_V27, 16, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingSaturateOdd(even, value, count); + + /// + /// svuint32_t svqrshrnt[_n_u64](svuint32_t even, svuint64_t op1, uint64_t imm2) + /// UQRSHRNT Ztied.S, Zop1.D, #imm2 + /// + /// codegenarm64test: + /// IF_SVE_GB_2A UQRSHRNT ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_uqrshrnt, EA_SCALABLE, REG_V26, REG_V27, 16, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count) => ShiftRightLogicalRoundedNarrowingSaturateOdd(even, value, count); + + + /// SubtractHighNarowingLower : Subtract narrow high part (bottom) + + /// + /// svint8_t svsubhnb[_s16](svint16_t op1, svint16_t op2) + /// SUBHNB Zresult.B, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_GC_3A SUBHNB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_subhnb, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector SubtractHighNarowingLower(Vector left, Vector right) => SubtractHighNarowingLower(left, right); + + /// + /// svint16_t svsubhnb[_s32](svint32_t op1, svint32_t op2) + /// SUBHNB Zresult.H, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_GC_3A SUBHNB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_subhnb, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector SubtractHighNarowingLower(Vector left, Vector right) => SubtractHighNarowingLower(left, right); + + /// + /// svint32_t svsubhnb[_s64](svint64_t op1, svint64_t op2) + /// SUBHNB Zresult.S, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_GC_3A SUBHNB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_subhnb, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector SubtractHighNarowingLower(Vector left, Vector right) => SubtractHighNarowingLower(left, right); + + /// + /// svuint8_t svsubhnb[_u16](svuint16_t op1, svuint16_t op2) + /// SUBHNB Zresult.B, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_GC_3A SUBHNB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_subhnb, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector SubtractHighNarowingLower(Vector left, Vector right) => SubtractHighNarowingLower(left, right); + + /// + /// svuint16_t svsubhnb[_u32](svuint32_t op1, svuint32_t op2) + /// SUBHNB Zresult.H, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_GC_3A SUBHNB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_subhnb, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector SubtractHighNarowingLower(Vector left, Vector right) => SubtractHighNarowingLower(left, right); + + /// + /// svuint32_t svsubhnb[_u64](svuint64_t op1, svuint64_t op2) + /// SUBHNB Zresult.S, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_GC_3A SUBHNB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_subhnb, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector SubtractHighNarowingLower(Vector left, Vector right) => SubtractHighNarowingLower(left, right); + + + /// SubtractHighNarowingUpper : Subtract narrow high part (top) + + /// + /// svint8_t svsubhnt[_s16](svint8_t even, svint16_t op1, svint16_t op2) + /// SUBHNT Ztied.B, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_GC_3A SUBHNT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_subhnt, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, Vector right) => SubtractHighNarowingUpper(even, left, right); + + /// + /// svint16_t svsubhnt[_s32](svint16_t even, svint32_t op1, svint32_t op2) + /// SUBHNT Ztied.H, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_GC_3A SUBHNT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_subhnt, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, Vector right) => SubtractHighNarowingUpper(even, left, right); + + /// + /// svint32_t svsubhnt[_s64](svint32_t even, svint64_t op1, svint64_t op2) + /// SUBHNT Ztied.S, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_GC_3A SUBHNT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_subhnt, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, Vector right) => SubtractHighNarowingUpper(even, left, right); + + /// + /// svuint8_t svsubhnt[_u16](svuint8_t even, svuint16_t op1, svuint16_t op2) + /// SUBHNT Ztied.B, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_GC_3A SUBHNT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_subhnt, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, Vector right) => SubtractHighNarowingUpper(even, left, right); + + /// + /// svuint16_t svsubhnt[_u32](svuint16_t even, svuint32_t op1, svuint32_t op2) + /// SUBHNT Ztied.H, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_GC_3A SUBHNT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_subhnt, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, Vector right) => SubtractHighNarowingUpper(even, left, right); + + /// + /// svuint32_t svsubhnt[_u64](svuint32_t even, svuint64_t op1, svuint64_t op2) + /// SUBHNT Ztied.S, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_GC_3A SUBHNT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_subhnt, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector SubtractHighNarowingUpper(Vector even, Vector left, Vector right) => SubtractHighNarowingUpper(even, left, right); + + + /// SubtractSaturate : Saturating subtract + + /// + /// svint8_t svqsub[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// SQSUB Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; SQSUB Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svqsub[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// SQSUB Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// SQSUBR Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// SQSUB Zresult.B, Zop1.B, Zop2.B + /// svint8_t svqsub[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; SQSUB Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; SQSUBR Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_ET_3A SQSUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqsub, EA_SCALABLE, REG_V29, REG_P0, REG_V24, INS_OPTS_SCALABLE_H); + /// IF_SVE_AT_3A SQSUB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqsub, EA_SCALABLE, REG_V7, REG_V0, REG_V31, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A SQSUB ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_sqsub, EA_SCALABLE, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svint16_t svqsub[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// SQSUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; SQSUB Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svqsub[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// SQSUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// SQSUBR Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// SQSUB Zresult.H, Zop1.H, Zop2.H + /// svint16_t svqsub[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; SQSUB Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; SQSUBR Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_ET_3A SQSUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqsub, EA_SCALABLE, REG_V29, REG_P0, REG_V24, INS_OPTS_SCALABLE_H); + /// IF_SVE_AT_3A SQSUB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqsub, EA_SCALABLE, REG_V7, REG_V0, REG_V31, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A SQSUB ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_sqsub, EA_SCALABLE, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svint32_t svqsub[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// SQSUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; SQSUB Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svqsub[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// SQSUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// SQSUBR Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// SQSUB Zresult.S, Zop1.S, Zop2.S + /// svint32_t svqsub[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; SQSUB Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; SQSUBR Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_ET_3A SQSUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqsub, EA_SCALABLE, REG_V29, REG_P0, REG_V24, INS_OPTS_SCALABLE_H); + /// IF_SVE_AT_3A SQSUB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqsub, EA_SCALABLE, REG_V7, REG_V0, REG_V31, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A SQSUB ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_sqsub, EA_SCALABLE, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svint64_t svqsub[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// SQSUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; SQSUB Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svqsub[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// SQSUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// SQSUBR Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// SQSUB Zresult.D, Zop1.D, Zop2.D + /// svint64_t svqsub[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; SQSUB Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; SQSUBR Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_ET_3A SQSUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqsub, EA_SCALABLE, REG_V29, REG_P0, REG_V24, INS_OPTS_SCALABLE_H); + /// IF_SVE_AT_3A SQSUB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqsub, EA_SCALABLE, REG_V7, REG_V0, REG_V31, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A SQSUB ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_sqsub, EA_SCALABLE, REG_V2, 1, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svuint8_t svqsub[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// UQSUB Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; UQSUB Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svqsub[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// UQSUB Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// UQSUBR Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// UQSUB Zresult.B, Zop1.B, Zop2.B + /// svuint8_t svqsub[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; UQSUB Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; UQSUBR Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_ET_3A UQSUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqsub, EA_SCALABLE, REG_V1, REG_P4, REG_V28, INS_OPTS_SCALABLE_D); + /// IF_SVE_AT_3A UQSUB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqsub, EA_SCALABLE, REG_V31, REG_V31, REG_V31, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A UQSUB ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_uqsub, EA_SCALABLE, REG_V6, 255, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_SHIFT); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svuint16_t svqsub[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// UQSUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; UQSUB Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svqsub[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// UQSUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// UQSUBR Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// UQSUB Zresult.H, Zop1.H, Zop2.H + /// svuint16_t svqsub[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; UQSUB Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; UQSUBR Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_ET_3A UQSUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqsub, EA_SCALABLE, REG_V1, REG_P4, REG_V28, INS_OPTS_SCALABLE_D); + /// IF_SVE_AT_3A UQSUB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqsub, EA_SCALABLE, REG_V31, REG_V31, REG_V31, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A UQSUB ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_uqsub, EA_SCALABLE, REG_V6, 255, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_SHIFT); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svuint32_t svqsub[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// UQSUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; UQSUB Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svqsub[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// UQSUB Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// UQSUBR Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// UQSUB Zresult.S, Zop1.S, Zop2.S + /// svuint32_t svqsub[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; UQSUB Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; UQSUBR Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_ET_3A UQSUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqsub, EA_SCALABLE, REG_V1, REG_P4, REG_V28, INS_OPTS_SCALABLE_D); + /// IF_SVE_AT_3A UQSUB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqsub, EA_SCALABLE, REG_V31, REG_V31, REG_V31, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A UQSUB ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_uqsub, EA_SCALABLE, REG_V6, 255, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_SHIFT); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + /// + /// svuint64_t svqsub[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// UQSUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; UQSUB Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svqsub[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// UQSUB Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// UQSUBR Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// UQSUB Zresult.D, Zop1.D, Zop2.D + /// svuint64_t svqsub[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; UQSUB Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; UQSUBR Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_ET_3A UQSUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqsub, EA_SCALABLE, REG_V1, REG_P4, REG_V28, INS_OPTS_SCALABLE_D); + /// IF_SVE_AT_3A UQSUB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqsub, EA_SCALABLE, REG_V31, REG_V31, REG_V31, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_EC_1A UQSUB ., ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_uqsub, EA_SCALABLE, REG_V6, 255, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_SHIFT); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SubtractSaturate(Vector left, Vector right) => SubtractSaturate(left, right); + + + /// SubtractSaturateReversed : Saturating subtract reversed + + /// + /// svint8_t svqsubr[_s8]_m(svbool_t pg, svint8_t op1, svint8_t op2) + /// SQSUBR Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; SQSUBR Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svint8_t svqsubr[_s8]_x(svbool_t pg, svint8_t op1, svint8_t op2) + /// SQSUBR Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// SQSUB Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// SQSUB Zresult.B, Zop2.B, Zop1.B + /// svint8_t svqsubr[_s8]_z(svbool_t pg, svint8_t op1, svint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; SQSUBR Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; SQSUB Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_ET_3A SQSUBR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqsubr, EA_SCALABLE, REG_V30, REG_P1, REG_V25, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right) => SubtractSaturateReversed(left, right); + + /// + /// svint16_t svqsubr[_s16]_m(svbool_t pg, svint16_t op1, svint16_t op2) + /// SQSUBR Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; SQSUBR Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svint16_t svqsubr[_s16]_x(svbool_t pg, svint16_t op1, svint16_t op2) + /// SQSUBR Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// SQSUB Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// SQSUB Zresult.H, Zop2.H, Zop1.H + /// svint16_t svqsubr[_s16]_z(svbool_t pg, svint16_t op1, svint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; SQSUBR Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; SQSUB Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_ET_3A SQSUBR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqsubr, EA_SCALABLE, REG_V30, REG_P1, REG_V25, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right) => SubtractSaturateReversed(left, right); + + /// + /// svint32_t svqsubr[_s32]_m(svbool_t pg, svint32_t op1, svint32_t op2) + /// SQSUBR Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; SQSUBR Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svint32_t svqsubr[_s32]_x(svbool_t pg, svint32_t op1, svint32_t op2) + /// SQSUBR Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// SQSUB Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// SQSUB Zresult.S, Zop2.S, Zop1.S + /// svint32_t svqsubr[_s32]_z(svbool_t pg, svint32_t op1, svint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; SQSUBR Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; SQSUB Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_ET_3A SQSUBR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqsubr, EA_SCALABLE, REG_V30, REG_P1, REG_V25, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right) => SubtractSaturateReversed(left, right); + + /// + /// svint64_t svqsubr[_s64]_m(svbool_t pg, svint64_t op1, svint64_t op2) + /// SQSUBR Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; SQSUBR Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svint64_t svqsubr[_s64]_x(svbool_t pg, svint64_t op1, svint64_t op2) + /// SQSUBR Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// SQSUB Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// SQSUB Zresult.D, Zop2.D, Zop1.D + /// svint64_t svqsubr[_s64]_z(svbool_t pg, svint64_t op1, svint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; SQSUBR Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; SQSUB Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_ET_3A SQSUBR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sqsubr, EA_SCALABLE, REG_V30, REG_P1, REG_V25, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right) => SubtractSaturateReversed(left, right); + + /// + /// svuint8_t svqsubr[_u8]_m(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// UQSUBR Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// MOVPRFX Zresult, Zop1; UQSUBR Zresult.B, Pg/M, Zresult.B, Zop2.B + /// svuint8_t svqsubr[_u8]_x(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// UQSUBR Ztied1.B, Pg/M, Ztied1.B, Zop2.B + /// UQSUB Ztied2.B, Pg/M, Ztied2.B, Zop1.B + /// UQSUB Zresult.B, Zop2.B, Zop1.B + /// svuint8_t svqsubr[_u8]_z(svbool_t pg, svuint8_t op1, svuint8_t op2) + /// MOVPRFX Zresult.B, Pg/Z, Zop1.B; UQSUBR Zresult.B, Pg/M, Zresult.B, Zop2.B + /// MOVPRFX Zresult.B, Pg/Z, Zop2.B; UQSUB Zresult.B, Pg/M, Zresult.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_ET_3A UQSUBR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqsubr, EA_SCALABLE, REG_V2, REG_P5, REG_V29, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right) => SubtractSaturateReversed(left, right); + + /// + /// svuint16_t svqsubr[_u16]_m(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// UQSUBR Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; UQSUBR Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svuint16_t svqsubr[_u16]_x(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// UQSUBR Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// UQSUB Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// UQSUB Zresult.H, Zop2.H, Zop1.H + /// svuint16_t svqsubr[_u16]_z(svbool_t pg, svuint16_t op1, svuint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; UQSUBR Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; UQSUB Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_ET_3A UQSUBR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqsubr, EA_SCALABLE, REG_V2, REG_P5, REG_V29, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right) => SubtractSaturateReversed(left, right); + + /// + /// svuint32_t svqsubr[_u32]_m(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// UQSUBR Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// MOVPRFX Zresult, Zop1; UQSUBR Zresult.S, Pg/M, Zresult.S, Zop2.S + /// svuint32_t svqsubr[_u32]_x(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// UQSUBR Ztied1.S, Pg/M, Ztied1.S, Zop2.S + /// UQSUB Ztied2.S, Pg/M, Ztied2.S, Zop1.S + /// UQSUB Zresult.S, Zop2.S, Zop1.S + /// svuint32_t svqsubr[_u32]_z(svbool_t pg, svuint32_t op1, svuint32_t op2) + /// MOVPRFX Zresult.S, Pg/Z, Zop1.S; UQSUBR Zresult.S, Pg/M, Zresult.S, Zop2.S + /// MOVPRFX Zresult.S, Pg/Z, Zop2.S; UQSUB Zresult.S, Pg/M, Zresult.S, Zop1.S + /// + /// codegenarm64test: + /// IF_SVE_ET_3A UQSUBR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqsubr, EA_SCALABLE, REG_V2, REG_P5, REG_V29, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right) => SubtractSaturateReversed(left, right); + + /// + /// svuint64_t svqsubr[_u64]_m(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// UQSUBR Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; UQSUBR Zresult.D, Pg/M, Zresult.D, Zop2.D + /// svuint64_t svqsubr[_u64]_x(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// UQSUBR Ztied1.D, Pg/M, Ztied1.D, Zop2.D + /// UQSUB Ztied2.D, Pg/M, Ztied2.D, Zop1.D + /// UQSUB Zresult.D, Zop2.D, Zop1.D + /// svuint64_t svqsubr[_u64]_z(svbool_t pg, svuint64_t op1, svuint64_t op2) + /// MOVPRFX Zresult.D, Pg/Z, Zop1.D; UQSUBR Zresult.D, Pg/M, Zresult.D, Zop2.D + /// MOVPRFX Zresult.D, Pg/Z, Zop2.D; UQSUB Zresult.D, Pg/M, Zresult.D, Zop1.D + /// + /// codegenarm64test: + /// IF_SVE_ET_3A UQSUBR ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uqsubr, EA_SCALABLE, REG_V2, REG_P5, REG_V29, INS_OPTS_SCALABLE_B); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector SubtractSaturateReversed(Vector left, Vector right) => SubtractSaturateReversed(left, right); + + + /// SubtractWideLower : Subtract wide (bottom) + + /// + /// svint16_t svsubwb[_s16](svint16_t op1, svint8_t op2) + /// SSUBWB Zresult.H, Zop1.H, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FM_3A SSUBWB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_ssubwb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SubtractWideLower(Vector left, Vector right) => SubtractWideLower(left, right); + + /// + /// svint32_t svsubwb[_s32](svint32_t op1, svint16_t op2) + /// SSUBWB Zresult.S, Zop1.S, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FM_3A SSUBWB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_ssubwb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SubtractWideLower(Vector left, Vector right) => SubtractWideLower(left, right); + + /// + /// svint64_t svsubwb[_s64](svint64_t op1, svint32_t op2) + /// SSUBWB Zresult.D, Zop1.D, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FM_3A SSUBWB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_ssubwb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SubtractWideLower(Vector left, Vector right) => SubtractWideLower(left, right); + + /// + /// svuint16_t svsubwb[_u16](svuint16_t op1, svuint8_t op2) + /// USUBWB Zresult.H, Zop1.H, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FM_3A USUBWB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_usubwb, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector SubtractWideLower(Vector left, Vector right) => SubtractWideLower(left, right); + + /// + /// svuint32_t svsubwb[_u32](svuint32_t op1, svuint16_t op2) + /// USUBWB Zresult.S, Zop1.S, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FM_3A USUBWB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_usubwb, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector SubtractWideLower(Vector left, Vector right) => SubtractWideLower(left, right); + + /// + /// svuint64_t svsubwb[_u64](svuint64_t op1, svuint32_t op2) + /// USUBWB Zresult.D, Zop1.D, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FM_3A USUBWB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_usubwb, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector SubtractWideLower(Vector left, Vector right) => SubtractWideLower(left, right); + + + /// SubtractWideUpper : Subtract wide (top) + + /// + /// svint16_t svsubwt[_s16](svint16_t op1, svint8_t op2) + /// SSUBWT Zresult.H, Zop1.H, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FM_3A SSUBWT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_ssubwt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector SubtractWideUpper(Vector left, Vector right) => SubtractWideUpper(left, right); + + /// + /// svint32_t svsubwt[_s32](svint32_t op1, svint16_t op2) + /// SSUBWT Zresult.S, Zop1.S, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FM_3A SSUBWT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_ssubwt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector SubtractWideUpper(Vector left, Vector right) => SubtractWideUpper(left, right); + + /// + /// svint64_t svsubwt[_s64](svint64_t op1, svint32_t op2) + /// SSUBWT Zresult.D, Zop1.D, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FM_3A SSUBWT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_ssubwt, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector SubtractWideUpper(Vector left, Vector right) => SubtractWideUpper(left, right); + + /// + /// svuint16_t svsubwt[_u16](svuint16_t op1, svuint8_t op2) + /// USUBWT Zresult.H, Zop1.H, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FM_3A USUBWT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_usubwt, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SubtractWideUpper(Vector left, Vector right) => SubtractWideUpper(left, right); + + /// + /// svuint32_t svsubwt[_u32](svuint32_t op1, svuint16_t op2) + /// USUBWT Zresult.S, Zop1.S, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FM_3A USUBWT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_usubwt, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SubtractWideUpper(Vector left, Vector right) => SubtractWideUpper(left, right); + + /// + /// svuint64_t svsubwt[_u64](svuint64_t op1, svuint32_t op2) + /// USUBWT Zresult.D, Zop1.D, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FM_3A USUBWT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_usubwt, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SubtractWideUpper(Vector left, Vector right) => SubtractWideUpper(left, right); + + + /// SubtractWideningLower : Subtract long (bottom) + + /// + /// svint16_t svsublb[_s16](svint8_t op1, svint8_t op2) + /// SSUBLB Zresult.H, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FL_3A SSUBLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_ssublb, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SubtractWideningLower(Vector left, Vector right) => SubtractWideningLower(left, right); + + /// + /// svint32_t svsublb[_s32](svint16_t op1, svint16_t op2) + /// SSUBLB Zresult.S, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FL_3A SSUBLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_ssublb, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SubtractWideningLower(Vector left, Vector right) => SubtractWideningLower(left, right); + + /// + /// svint64_t svsublb[_s64](svint32_t op1, svint32_t op2) + /// SSUBLB Zresult.D, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FL_3A SSUBLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_ssublb, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SubtractWideningLower(Vector left, Vector right) => SubtractWideningLower(left, right); + + /// + /// svuint16_t svsublb[_u16](svuint8_t op1, svuint8_t op2) + /// USUBLB Zresult.H, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FL_3A USUBLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_usublb, EA_SCALABLE, REG_V30, REG_V31, REG_V0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SubtractWideningLower(Vector left, Vector right) => SubtractWideningLower(left, right); + + /// + /// svuint32_t svsublb[_u32](svuint16_t op1, svuint16_t op2) + /// USUBLB Zresult.S, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FL_3A USUBLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_usublb, EA_SCALABLE, REG_V30, REG_V31, REG_V0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SubtractWideningLower(Vector left, Vector right) => SubtractWideningLower(left, right); + + /// + /// svuint64_t svsublb[_u64](svuint32_t op1, svuint32_t op2) + /// USUBLB Zresult.D, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FL_3A USUBLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_usublb, EA_SCALABLE, REG_V30, REG_V31, REG_V0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SubtractWideningLower(Vector left, Vector right) => SubtractWideningLower(left, right); + + + /// SubtractWideningLowerUpper : Subtract long (bottom - top) + + /// + /// svint16_t svsublbt[_s16](svint8_t op1, svint8_t op2) + /// SSUBLBT Zresult.H, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FS_3A SSUBLBT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_ssublbt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SubtractWideningLowerUpper(Vector left, Vector right) => SubtractWideningLowerUpper(left, right); + + /// + /// svint32_t svsublbt[_s32](svint16_t op1, svint16_t op2) + /// SSUBLBT Zresult.S, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FS_3A SSUBLBT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_ssublbt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SubtractWideningLowerUpper(Vector left, Vector right) => SubtractWideningLowerUpper(left, right); + + /// + /// svint64_t svsublbt[_s64](svint32_t op1, svint32_t op2) + /// SSUBLBT Zresult.D, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FS_3A SSUBLBT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_ssublbt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector SubtractWideningLowerUpper(Vector left, Vector right) => SubtractWideningLowerUpper(left, right); + + + /// SubtractWideningUpper : Subtract long (top) + + /// + /// svint16_t svsublt[_s16](svint8_t op1, svint8_t op2) + /// SSUBLT Zresult.H, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FL_3A SSUBLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_ssublt, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SubtractWideningUpper(Vector left, Vector right) => SubtractWideningUpper(left, right); + + /// + /// svint32_t svsublt[_s32](svint16_t op1, svint16_t op2) + /// SSUBLT Zresult.S, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FL_3A SSUBLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_ssublt, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SubtractWideningUpper(Vector left, Vector right) => SubtractWideningUpper(left, right); + + /// + /// svint64_t svsublt[_s64](svint32_t op1, svint32_t op2) + /// SSUBLT Zresult.D, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FL_3A SSUBLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_ssublt, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SubtractWideningUpper(Vector left, Vector right) => SubtractWideningUpper(left, right); + + /// + /// svuint16_t svsublt[_u16](svuint8_t op1, svuint8_t op2) + /// USUBLT Zresult.H, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FL_3A USUBLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_usublt, EA_SCALABLE, REG_V1, REG_V2, REG_V3, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SubtractWideningUpper(Vector left, Vector right) => SubtractWideningUpper(left, right); + + /// + /// svuint32_t svsublt[_u32](svuint16_t op1, svuint16_t op2) + /// USUBLT Zresult.S, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FL_3A USUBLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_usublt, EA_SCALABLE, REG_V1, REG_V2, REG_V3, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SubtractWideningUpper(Vector left, Vector right) => SubtractWideningUpper(left, right); + + /// + /// svuint64_t svsublt[_u64](svuint32_t op1, svuint32_t op2) + /// USUBLT Zresult.D, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FL_3A USUBLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_usublt, EA_SCALABLE, REG_V1, REG_V2, REG_V3, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SubtractWideningUpper(Vector left, Vector right) => SubtractWideningUpper(left, right); + + + /// SubtractWideningUpperLower : Subtract long (top - bottom) + + /// + /// svint16_t svsubltb[_s16](svint8_t op1, svint8_t op2) + /// SSUBLTB Zresult.H, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FS_3A SSUBLTB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_ssubltb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SubtractWideningUpperLower(Vector left, Vector right) => SubtractWideningUpperLower(left, right); + + /// + /// svint32_t svsubltb[_s32](svint16_t op1, svint16_t op2) + /// SSUBLTB Zresult.S, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FS_3A SSUBLTB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_ssubltb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SubtractWideningUpperLower(Vector left, Vector right) => SubtractWideningUpperLower(left, right); + + /// + /// svint64_t svsubltb[_s64](svint32_t op1, svint32_t op2) + /// SSUBLTB Zresult.D, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FS_3A SSUBLTB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_ssubltb, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SubtractWideningUpperLower(Vector left, Vector right) => SubtractWideningUpperLower(left, right); + + + /// SubtractWithBorrowWideningLower : Subtract with borrow long (bottom) + + /// + /// svuint32_t svsbclb[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// SBCLB Ztied1.S, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; SBCLB Zresult.S, Zop2.S, Zop3.S + /// + /// codegenarm64test: + /// IF_SVE_FY_3A SBCLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sbclb, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_sbclb, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SubtractWithBorrowWideningLower(Vector op1, Vector op2, Vector op3) => SubtractWithBorrowWideningLower(op1, op2, op3); + + /// + /// svuint64_t svsbclb[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// SBCLB Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; SBCLB Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_FY_3A SBCLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sbclb, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_sbclb, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SubtractWithBorrowWideningLower(Vector op1, Vector op2, Vector op3) => SubtractWithBorrowWideningLower(op1, op2, op3); + + + /// SubtractWithBorrowWideningUpper : Subtract with borrow long (top) + + /// + /// svuint32_t svsbclt[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// SBCLT Ztied1.S, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; SBCLT Zresult.S, Zop2.S, Zop3.S + /// + /// codegenarm64test: + /// IF_SVE_FY_3A SBCLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sbclt, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_sbclt, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SubtractWithBorrowWideningUpper(Vector op1, Vector op2, Vector op3) => SubtractWithBorrowWideningUpper(op1, op2, op3); + + /// + /// svuint64_t svsbclt[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// SBCLT Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; SBCLT Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_FY_3A SBCLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_sbclt, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_sbclt, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector SubtractWithBorrowWideningUpper(Vector op1, Vector op2, Vector op3) => SubtractWithBorrowWideningUpper(op1, op2, op3); + + + /// UpConvertWideningUpper : Up convert long (top) + + /// + /// svfloat64_t svcvtlt_f64[_f32]_m(svfloat64_t inactive, svbool_t pg, svfloat32_t op) + /// FCVTLT Ztied.D, Pg/M, Zop.S + /// svfloat64_t svcvtlt_f64[_f32]_x(svbool_t pg, svfloat32_t op) + /// FCVTLT Ztied.D, Pg/M, Ztied.S + /// + /// codegenarm64test: + /// IF_SVE_GQ_3A FCVTLT .D, /M, .S + /// theEmitter->emitIns_R_R_R(INS_sve_fcvtlt, EA_SCALABLE, REG_V0, REG_P7, REG_V1, INS_OPTS_S_TO_D); + /// theEmitter->emitIns_R_R_R(INS_sve_fcvtlt, EA_SCALABLE, REG_V14, REG_P7, REG_V20, INS_OPTS_H_TO_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector UpConvertWideningUpper(Vector value) => UpConvertWideningUpper(value); + + + /// VectorTableLookup : Table lookup in two-vector table + + /// + /// svint8_t svtbl2[_s8](svint8x2_t data, svuint8_t indices) + /// TBL Zresult.B, {Zdata0.B, Zdata1.B}, Zindices.B + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) => VectorTableLookup(data1,, indices); + + /// + /// svint16_t svtbl2[_s16](svint16x2_t data, svuint16_t indices) + /// TBL Zresult.H, {Zdata0.H, Zdata1.H}, Zindices.H + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) => VectorTableLookup(data1,, indices); + + /// + /// svint32_t svtbl2[_s32](svint32x2_t data, svuint32_t indices) + /// TBL Zresult.S, {Zdata0.S, Zdata1.S}, Zindices.S + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) => VectorTableLookup(data1,, indices); + + /// + /// svint64_t svtbl2[_s64](svint64x2_t data, svuint64_t indices) + /// TBL Zresult.D, {Zdata0.D, Zdata1.D}, Zindices.D + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) => VectorTableLookup(data1,, indices); + + /// + /// svuint8_t svtbl2[_u8](svuint8x2_t data, svuint8_t indices) + /// TBL Zresult.B, {Zdata0.B, Zdata1.B}, Zindices.B + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) => VectorTableLookup(data1,, indices); + + /// + /// svuint16_t svtbl2[_u16](svuint16x2_t data, svuint16_t indices) + /// TBL Zresult.H, {Zdata0.H, Zdata1.H}, Zindices.H + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) => VectorTableLookup(data1,, indices); + + /// + /// svuint32_t svtbl2[_u32](svuint32x2_t data, svuint32_t indices) + /// TBL Zresult.S, {Zdata0.S, Zdata1.S}, Zindices.S + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) => VectorTableLookup(data1,, indices); + + /// + /// svuint64_t svtbl2[_u64](svuint64x2_t data, svuint64_t indices) + /// TBL Zresult.D, {Zdata0.D, Zdata1.D}, Zindices.D + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) => VectorTableLookup(data1,, indices); + + /// + /// svfloat32_t svtbl2[_f32](svfloat32x2_t data, svuint32_t indices) + /// TBL Zresult.S, {Zdata0.S, Zdata1.S}, Zindices.S + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) => VectorTableLookup(data1,, indices); + + /// + /// svfloat64_t svtbl2[_f64](svfloat64x2_t data, svuint64_t indices) + /// TBL Zresult.D, {Zdata0.D, Zdata1.D}, Zindices.D + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) => VectorTableLookup(data1,, indices); + + + /// VectorTableLookupExtension : Table lookup in single-vector table (merging) + + /// + /// svint8_t svtbx[_s8](svint8_t fallback, svint8_t data, svuint8_t indices) + /// TBX Ztied.B, Zdata.B, Zindices.B + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBX ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_tbx, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_tbx, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) => VectorTableLookupExtension(fallback, data, indices); + + /// + /// svint16_t svtbx[_s16](svint16_t fallback, svint16_t data, svuint16_t indices) + /// TBX Ztied.H, Zdata.H, Zindices.H + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBX ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_tbx, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_tbx, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) => VectorTableLookupExtension(fallback, data, indices); + + /// + /// svint32_t svtbx[_s32](svint32_t fallback, svint32_t data, svuint32_t indices) + /// TBX Ztied.S, Zdata.S, Zindices.S + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBX ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_tbx, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_tbx, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) => VectorTableLookupExtension(fallback, data, indices); + + /// + /// svint64_t svtbx[_s64](svint64_t fallback, svint64_t data, svuint64_t indices) + /// TBX Ztied.D, Zdata.D, Zindices.D + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBX ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_tbx, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_tbx, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) => VectorTableLookupExtension(fallback, data, indices); + + /// + /// svuint8_t svtbx[_u8](svuint8_t fallback, svuint8_t data, svuint8_t indices) + /// TBX Ztied.B, Zdata.B, Zindices.B + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBX ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_tbx, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_tbx, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) => VectorTableLookupExtension(fallback, data, indices); + + /// + /// svuint16_t svtbx[_u16](svuint16_t fallback, svuint16_t data, svuint16_t indices) + /// TBX Ztied.H, Zdata.H, Zindices.H + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBX ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_tbx, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_tbx, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) => VectorTableLookupExtension(fallback, data, indices); + + /// + /// svuint32_t svtbx[_u32](svuint32_t fallback, svuint32_t data, svuint32_t indices) + /// TBX Ztied.S, Zdata.S, Zindices.S + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBX ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_tbx, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_tbx, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) => VectorTableLookupExtension(fallback, data, indices); + + /// + /// svuint64_t svtbx[_u64](svuint64_t fallback, svuint64_t data, svuint64_t indices) + /// TBX Ztied.D, Zdata.D, Zindices.D + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBX ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_tbx, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_tbx, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) => VectorTableLookupExtension(fallback, data, indices); + + /// + /// svfloat32_t svtbx[_f32](svfloat32_t fallback, svfloat32_t data, svuint32_t indices) + /// TBX Ztied.S, Zdata.S, Zindices.S + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBX ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_tbx, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_tbx, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) => VectorTableLookupExtension(fallback, data, indices); + + /// + /// svfloat64_t svtbx[_f64](svfloat64_t fallback, svfloat64_t data, svuint64_t indices) + /// TBX Ztied.D, Zdata.D, Zindices.D + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBX ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_tbx, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_tbx, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) => VectorTableLookupExtension(fallback, data, indices); + + + /// Xor : Bitwise exclusive OR of three vectors + + /// + /// svint8_t sveor3[_s8](svint8_t op1, svint8_t op2, svint8_t op3) + /// EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// EOR3 Ztied2.D, Ztied2.D, Zop3.D, Zop1.D + /// EOR3 Ztied3.D, Ztied3.D, Zop1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; EOR3 Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A EOR3 .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_eor3, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3) => Xor(value1, value2, value3); + + /// + /// svint16_t sveor3[_s16](svint16_t op1, svint16_t op2, svint16_t op3) + /// EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// EOR3 Ztied2.D, Ztied2.D, Zop3.D, Zop1.D + /// EOR3 Ztied3.D, Ztied3.D, Zop1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; EOR3 Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A EOR3 .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_eor3, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3) => Xor(value1, value2, value3); + + /// + /// svint32_t sveor3[_s32](svint32_t op1, svint32_t op2, svint32_t op3) + /// EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// EOR3 Ztied2.D, Ztied2.D, Zop3.D, Zop1.D + /// EOR3 Ztied3.D, Ztied3.D, Zop1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; EOR3 Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A EOR3 .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_eor3, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3) => Xor(value1, value2, value3); + + /// + /// svint64_t sveor3[_s64](svint64_t op1, svint64_t op2, svint64_t op3) + /// EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// EOR3 Ztied2.D, Ztied2.D, Zop3.D, Zop1.D + /// EOR3 Ztied3.D, Ztied3.D, Zop1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; EOR3 Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A EOR3 .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_eor3, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3) => Xor(value1, value2, value3); + + /// + /// svuint8_t sveor3[_u8](svuint8_t op1, svuint8_t op2, svuint8_t op3) + /// EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// EOR3 Ztied2.D, Ztied2.D, Zop3.D, Zop1.D + /// EOR3 Ztied3.D, Ztied3.D, Zop1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; EOR3 Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A EOR3 .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_eor3, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3) => Xor(value1, value2, value3); + + /// + /// svuint16_t sveor3[_u16](svuint16_t op1, svuint16_t op2, svuint16_t op3) + /// EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// EOR3 Ztied2.D, Ztied2.D, Zop3.D, Zop1.D + /// EOR3 Ztied3.D, Ztied3.D, Zop1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; EOR3 Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A EOR3 .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_eor3, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3) => Xor(value1, value2, value3); + + /// + /// svuint32_t sveor3[_u32](svuint32_t op1, svuint32_t op2, svuint32_t op3) + /// EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// EOR3 Ztied2.D, Ztied2.D, Zop3.D, Zop1.D + /// EOR3 Ztied3.D, Ztied3.D, Zop1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; EOR3 Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A EOR3 .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_eor3, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3) => Xor(value1, value2, value3); + + /// + /// svuint64_t sveor3[_u64](svuint64_t op1, svuint64_t op2, svuint64_t op3) + /// EOR3 Ztied1.D, Ztied1.D, Zop2.D, Zop3.D + /// EOR3 Ztied2.D, Ztied2.D, Zop3.D, Zop1.D + /// EOR3 Ztied3.D, Ztied3.D, Zop1.D, Zop2.D + /// MOVPRFX Zresult, Zop1; EOR3 Zresult.D, Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_AV_3A EOR3 .D, .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_eor3, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3) => Xor(value1, value2, value3); + + + /// XorRotateRight : Bitwise exclusive OR and rotate right + + /// + /// svint8_t svxar[_n_s8](svint8_t op1, svint8_t op2, uint64_t imm3) + /// XAR Ztied1.B, Ztied1.B, Zop2.B, #imm3 + /// XAR Ztied2.B, Ztied2.B, Zop1.B, #imm3 + /// MOVPRFX Zresult, Zop1; XAR Zresult.B, Zresult.B, Zop2.B, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_AW_2A XAR ., ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V0, REG_V1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V2, REG_V3, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V4, REG_V5, 2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V6, REG_V7, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V8, REG_V9, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V10, REG_V11, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V12, REG_V13, 4, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V14, REG_V15, 64, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count) => XorRotateRight(left, right, count); + + /// + /// svint16_t svxar[_n_s16](svint16_t op1, svint16_t op2, uint64_t imm3) + /// XAR Ztied1.H, Ztied1.H, Zop2.H, #imm3 + /// XAR Ztied2.H, Ztied2.H, Zop1.H, #imm3 + /// MOVPRFX Zresult, Zop1; XAR Zresult.H, Zresult.H, Zop2.H, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_AW_2A XAR ., ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V0, REG_V1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V2, REG_V3, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V4, REG_V5, 2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V6, REG_V7, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V8, REG_V9, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V10, REG_V11, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V12, REG_V13, 4, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V14, REG_V15, 64, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count) => XorRotateRight(left, right, count); + + /// + /// svint32_t svxar[_n_s32](svint32_t op1, svint32_t op2, uint64_t imm3) + /// XAR Ztied1.S, Ztied1.S, Zop2.S, #imm3 + /// XAR Ztied2.S, Ztied2.S, Zop1.S, #imm3 + /// MOVPRFX Zresult, Zop1; XAR Zresult.S, Zresult.S, Zop2.S, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_AW_2A XAR ., ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V0, REG_V1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V2, REG_V3, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V4, REG_V5, 2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V6, REG_V7, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V8, REG_V9, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V10, REG_V11, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V12, REG_V13, 4, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V14, REG_V15, 64, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count) => XorRotateRight(left, right, count); + + /// + /// svint64_t svxar[_n_s64](svint64_t op1, svint64_t op2, uint64_t imm3) + /// XAR Ztied1.D, Ztied1.D, Zop2.D, #imm3 + /// XAR Ztied2.D, Ztied2.D, Zop1.D, #imm3 + /// MOVPRFX Zresult, Zop1; XAR Zresult.D, Zresult.D, Zop2.D, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_AW_2A XAR ., ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V0, REG_V1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V2, REG_V3, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V4, REG_V5, 2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V6, REG_V7, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V8, REG_V9, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V10, REG_V11, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V12, REG_V13, 4, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V14, REG_V15, 64, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count) => XorRotateRight(left, right, count); + + /// + /// svuint8_t svxar[_n_u8](svuint8_t op1, svuint8_t op2, uint64_t imm3) + /// XAR Ztied1.B, Ztied1.B, Zop2.B, #imm3 + /// XAR Ztied2.B, Ztied2.B, Zop1.B, #imm3 + /// MOVPRFX Zresult, Zop1; XAR Zresult.B, Zresult.B, Zop2.B, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_AW_2A XAR ., ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V0, REG_V1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V2, REG_V3, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V4, REG_V5, 2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V6, REG_V7, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V8, REG_V9, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V10, REG_V11, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V12, REG_V13, 4, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V14, REG_V15, 64, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count) => XorRotateRight(left, right, count); + + /// + /// svuint16_t svxar[_n_u16](svuint16_t op1, svuint16_t op2, uint64_t imm3) + /// XAR Ztied1.H, Ztied1.H, Zop2.H, #imm3 + /// XAR Ztied2.H, Ztied2.H, Zop1.H, #imm3 + /// MOVPRFX Zresult, Zop1; XAR Zresult.H, Zresult.H, Zop2.H, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_AW_2A XAR ., ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V0, REG_V1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V2, REG_V3, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V4, REG_V5, 2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V6, REG_V7, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V8, REG_V9, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V10, REG_V11, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V12, REG_V13, 4, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V14, REG_V15, 64, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count) => XorRotateRight(left, right, count); + + /// + /// svuint32_t svxar[_n_u32](svuint32_t op1, svuint32_t op2, uint64_t imm3) + /// XAR Ztied1.S, Ztied1.S, Zop2.S, #imm3 + /// XAR Ztied2.S, Ztied2.S, Zop1.S, #imm3 + /// MOVPRFX Zresult, Zop1; XAR Zresult.S, Zresult.S, Zop2.S, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_AW_2A XAR ., ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V0, REG_V1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V2, REG_V3, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V4, REG_V5, 2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V6, REG_V7, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V8, REG_V9, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V10, REG_V11, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V12, REG_V13, 4, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V14, REG_V15, 64, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count) => XorRotateRight(left, right, count); + + /// + /// svuint64_t svxar[_n_u64](svuint64_t op1, svuint64_t op2, uint64_t imm3) + /// XAR Ztied1.D, Ztied1.D, Zop2.D, #imm3 + /// XAR Ztied2.D, Ztied2.D, Zop1.D, #imm3 + /// MOVPRFX Zresult, Zop1; XAR Zresult.D, Zresult.D, Zop2.D, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_AW_2A XAR ., ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V0, REG_V1, 1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V2, REG_V3, 8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V4, REG_V5, 2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V6, REG_V7, 16, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V8, REG_V9, 3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V10, REG_V11, 32, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V12, REG_V13, 4, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_I(INS_sve_xar, EA_SCALABLE, REG_V14, REG_V15, 64, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count) => XorRotateRight(left, right, count); + + } +} + diff --git a/sve_api/out_helper_api/SveAes.cs b/sve_api/out_helper_api/SveAes.cs new file mode 100644 index 0000000000000..fd1abc004f67a --- /dev/null +++ b/sve_api/out_helper_api/SveAes.cs @@ -0,0 +1,115 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveAes : AdvSimd + { + internal SveAes() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// AesInverseMixColumns : AES inverse mix columns + + /// + /// svuint8_t svaesimc[_u8](svuint8_t op) + /// AESIMC Ztied.B, Ztied.B + /// + /// codegenarm64test: + /// IF_SVE_GL_1A AESIMC .B, .B + /// theEmitter->emitIns_R(INS_sve_aesimc, EA_SCALABLE, REG_V0); + /// + public static unsafe Vector AesInverseMixColumns(Vector value) => AesInverseMixColumns(value); + + + /// AesMixColumns : AES mix columns + + /// + /// svuint8_t svaesmc[_u8](svuint8_t op) + /// AESMC Ztied.B, Ztied.B + /// + /// codegenarm64test: + /// IF_SVE_GL_1A AESMC .B, .B + /// theEmitter->emitIns_R(INS_sve_aesmc, EA_SCALABLE, REG_V5); + /// + public static unsafe Vector AesMixColumns(Vector value) => AesMixColumns(value); + + + /// AesSingleRoundDecryption : AES single round decryption + + /// + /// svuint8_t svaesd[_u8](svuint8_t op1, svuint8_t op2) + /// AESD Ztied1.B, Ztied1.B, Zop2.B + /// AESD Ztied2.B, Ztied2.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_GK_2A AESD .B, + /// theEmitter->emitIns_R_R(INS_sve_aesd, EA_SCALABLE, REG_V0, REG_V0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector AesSingleRoundDecryption(Vector left, Vector right) => AesSingleRoundDecryption(left, right); + + + /// AesSingleRoundEncryption : AES single round encryption + + /// + /// svuint8_t svaese[_u8](svuint8_t op1, svuint8_t op2) + /// AESE Ztied1.B, Ztied1.B, Zop2.B + /// AESE Ztied2.B, Ztied2.B, Zop1.B + /// + /// codegenarm64test: + /// IF_SVE_GK_2A AESE .B, + /// theEmitter->emitIns_R_R(INS_sve_aese, EA_SCALABLE, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector AesSingleRoundEncryption(Vector left, Vector right) => AesSingleRoundEncryption(left, right); + + + /// PolynomialMultiplyWideningLower : Polynomial multiply long (bottom) + + /// + /// svuint64_t svpmullb_pair[_u64](svuint64_t op1, svuint64_t op2) + /// PMULLB Zresult.Q, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_FN_3A PMULLB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_pmullb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// IF_SVE_FN_3B PMULLB .Q, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_pmullb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q); + /// + public static unsafe Vector PolynomialMultiplyWideningLower(Vector left, Vector right) => PolynomialMultiplyWideningLower(left, right); + + + /// PolynomialMultiplyWideningUpper : Polynomial multiply long (top) + + /// + /// svuint64_t svpmullt_pair[_u64](svuint64_t op1, svuint64_t op2) + /// PMULLT Zresult.Q, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_FN_3A PMULLT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_pmullt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_FN_3B PMULLT .Q, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_pmullt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q); + /// + public static unsafe Vector PolynomialMultiplyWideningUpper(Vector left, Vector right) => PolynomialMultiplyWideningUpper(left, right); + + } +} + diff --git a/sve_api/out_helper_api/SveBf16.cs b/sve_api/out_helper_api/SveBf16.cs new file mode 100644 index 0000000000000..cd3dd48d64d3c --- /dev/null +++ b/sve_api/out_helper_api/SveBf16.cs @@ -0,0 +1,1152 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveBf16 : AdvSimd + { + internal SveBf16() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// Bfloat16DotProduct : BFloat16 dot product + + /// + /// svfloat32_t svbfdot[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3) + /// BFDOT Ztied1.S, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; BFDOT Zresult.S, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_GY_3B BFDOT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_bfdot, EA_SCALABLE, REG_V8, REG_V10, REG_V5, 2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_bfdot, EA_SCALABLE, REG_V12, REG_V14, REG_V7, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_HA_3A BFDOT .S, .H, .H + /// theEmitter->emitIns_R_R_R(INS_sve_bfdot, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector Bfloat16DotProduct(Vector addend, Vector left, Vector right) => Bfloat16DotProduct(addend, left, right); + + + /// Bfloat16MatrixMultiplyAccumulate : BFloat16 matrix multiply-accumulate + + /// + /// svfloat32_t svbfmmla[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3) + /// BFMMLA Ztied1.S, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; BFMMLA Zresult.S, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_HD_3A BFMMLA .S, .H, .H + /// theEmitter->emitIns_R_R_R(INS_sve_bfmmla, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector Bfloat16MatrixMultiplyAccumulate(Vector op1, Vector op2, Vector op3) => Bfloat16MatrixMultiplyAccumulate(op1, op2, op3); + + + /// Bfloat16MultiplyAddWideningToSinglePrecisionLower : BFloat16 multiply-add long to single-precision (bottom) + + /// + /// svfloat32_t svbfmlalb[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3) + /// BFMLALB Ztied1.S, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; BFMLALB Zresult.S, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_GZ_3A BFMLALB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_bfmlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, INS_OPTS_SCALABLE_H); + /// IF_SVE_HB_3A BFMLALB .S, .H, .H + /// theEmitter->emitIns_R_R_R(INS_sve_bfmlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector Bfloat16MultiplyAddWideningToSinglePrecisionLower(Vector op1, Vector op2, Vector op3) => Bfloat16MultiplyAddWideningToSinglePrecisionLower(op1, op2, op3); + + /// + /// svfloat32_t svbfmlalb_lane[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3, uint64_t imm_index) + /// BFMLALB Ztied1.S, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; BFMLALB Zresult.S, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_GZ_3A BFMLALB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_bfmlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, INS_OPTS_SCALABLE_H); + /// IF_SVE_HB_3A BFMLALB .S, .H, .H + /// theEmitter->emitIns_R_R_R(INS_sve_bfmlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector Bfloat16MultiplyAddWideningToSinglePrecisionLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => Bfloat16MultiplyAddWideningToSinglePrecisionLower(op1, op2, op3, imm_index); + + + /// Bfloat16MultiplyAddWideningToSinglePrecisionUpper : BFloat16 multiply-add long to single-precision (top) + + /// + /// svfloat32_t svbfmlalt[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3) + /// BFMLALT Ztied1.S, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; BFMLALT Zresult.S, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_GZ_3A BFMLALT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_bfmlalt, EA_SCALABLE, REG_V2, REG_V3, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// IF_SVE_HB_3A BFMLALT .S, .H, .H + /// theEmitter->emitIns_R_R_R(INS_sve_bfmlalt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector Bfloat16MultiplyAddWideningToSinglePrecisionUpper(Vector op1, Vector op2, Vector op3) => Bfloat16MultiplyAddWideningToSinglePrecisionUpper(op1, op2, op3); + + /// + /// svfloat32_t svbfmlalt_lane[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3, uint64_t imm_index) + /// BFMLALT Ztied1.S, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; BFMLALT Zresult.S, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_GZ_3A BFMLALT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_bfmlalt, EA_SCALABLE, REG_V2, REG_V3, REG_V1, 1, INS_OPTS_SCALABLE_H); + /// IF_SVE_HB_3A BFMLALT .S, .H, .H + /// theEmitter->emitIns_R_R_R(INS_sve_bfmlalt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector Bfloat16MultiplyAddWideningToSinglePrecisionUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => Bfloat16MultiplyAddWideningToSinglePrecisionUpper(op1, op2, op3, imm_index); + + + /// ConcatenateEvenInt128FromTwoInputs : Concatenate even quadwords from two inputs + + /// + /// svbfloat16_t svuzp1q[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// UZP1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) => ConcatenateEvenInt128FromTwoInputs(left, right); + + + /// ConcatenateOddInt128FromTwoInputs : Concatenate odd quadwords from two inputs + + /// + /// svbfloat16_t svuzp2q[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// UZP2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) => ConcatenateOddInt128FromTwoInputs(left, right); + + + /// ConditionalExtractAfterLastActiveElement : Conditionally extract element after last + + /// + /// svbfloat16_t svclasta[_bf16](svbool_t pg, svbfloat16_t fallback, svbfloat16_t data) + /// CLASTA Ztied.H, Pg, Ztied.H, Zdata.H + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.H, Pg, Zresult.H, Zdata.H + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValue, data); + + /// + /// svbfloat16_t svclasta[_bf16](svbool_t pg, svbfloat16_t fallback, svbfloat16_t data) + /// CLASTA Ztied.H, Pg, Ztied.H, Zdata.H + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.H, Pg, Zresult.H, Zdata.H + /// bfloat16_t svclasta[_n_bf16](svbool_t pg, bfloat16_t fallback, svbfloat16_t data) + /// CLASTA Wtied, Pg, Wtied, Zdata.H + /// CLASTA Htied, Pg, Htied, Zdata.H + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe bfloat16 ConditionalExtractAfterLastActiveElement(Vector mask, bfloat16 defaultValues, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValues, data); + + + /// ConditionalExtractAfterLastActiveElementAndReplicate : Conditionally extract element after last + + /// + /// svbfloat16_t svclasta[_bf16](svbool_t pg, svbfloat16_t fallback, svbfloat16_t data) + /// CLASTA Ztied.H, Pg, Ztied.H, Zdata.H + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.H, Pg, Zresult.H, Zdata.H + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) => ConditionalExtractAfterLastActiveElementAndReplicate(mask, defaultScalar, data); + + + /// ConditionalExtractLastActiveElement : Conditionally extract last element + + /// + /// svbfloat16_t svclastb[_bf16](svbool_t pg, svbfloat16_t fallback, svbfloat16_t data) + /// CLASTB Ztied.H, Pg, Ztied.H, Zdata.H + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.H, Pg, Zresult.H, Zdata.H + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValue, data); + + /// + /// svbfloat16_t svclastb[_bf16](svbool_t pg, svbfloat16_t fallback, svbfloat16_t data) + /// CLASTB Ztied.H, Pg, Ztied.H, Zdata.H + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.H, Pg, Zresult.H, Zdata.H + /// bfloat16_t svclastb[_n_bf16](svbool_t pg, bfloat16_t fallback, svbfloat16_t data) + /// CLASTB Wtied, Pg, Wtied, Zdata.H + /// CLASTB Htied, Pg, Htied, Zdata.H + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe bfloat16 ConditionalExtractLastActiveElement(Vector mask, bfloat16 defaultValues, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValues, data); + + + /// ConditionalExtractLastActiveElementAndReplicate : Conditionally extract last element + + /// + /// svbfloat16_t svclastb[_bf16](svbool_t pg, svbfloat16_t fallback, svbfloat16_t data) + /// CLASTB Ztied.H, Pg, Ztied.H, Zdata.H + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.H, Pg, Zresult.H, Zdata.H + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) => ConditionalExtractLastActiveElementAndReplicate(mask, fallback, data); + + + /// ConditionalSelect : Conditionally select elements + + /// + /// svbfloat16_t svsel[_bf16](svbool_t pg, svbfloat16_t op1, svbfloat16_t op2) + /// SEL Zresult.H, Pg, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_P4, REG_P6, REG_P13, REG_P10, INS_OPTS_SCALABLE_B); /* SEL .B, , .B, .B */ + /// IF_SVE_CW_4A SEL ., , ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_V29, REG_P15, REG_V28, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_V5, REG_P13, REG_V27, REG_V5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) => ConditionalSelect(mask, left, right); + + + /// ConvertToBFloat16 : Floating-point convert + + /// + /// svbfloat16_t svcvt_bf16[_f32]_m(svbfloat16_t inactive, svbool_t pg, svfloat32_t op) + /// BFCVT Ztied.H, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; BFCVT Zresult.H, Pg/M, Zop.S + /// svbfloat16_t svcvt_bf16[_f32]_x(svbool_t pg, svfloat32_t op) + /// BFCVT Ztied.H, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; BFCVT Zresult.H, Pg/M, Zop.S + /// svbfloat16_t svcvt_bf16[_f32]_z(svbool_t pg, svfloat32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; BFCVT Zresult.H, Pg/M, Zop.S + /// + /// codegenarm64test: + /// sve_bfcvt - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToBFloat16(Vector value) => ConvertToBFloat16(value); + + + + /// CreateFalseMaskBFloat16 : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// PFALSE Presult.B + /// + /// codegenarm64test: + /// IF_SVE_DJ_1A PFALSE .B + /// theEmitter->emitIns_R(INS_sve_pfalse, EA_SCALABLE, REG_P13); + /// + public static unsafe Vector CreateFalseMaskBFloat16() => CreateFalseMaskBFloat16(); + + + /// CreateTrueMaskBFloat16 : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b8(enum svpattern pattern) + /// PTRUE Presult.B, pattern + /// + /// codegenarm64test: + /// IF_SVE_DE_1A PTRUE .{, } + /// theEmitter->emitIns_R_PATTERN(INS_sve_ptrue, EA_SCALABLE, REG_P0, INS_OPTS_SCALABLE_B, SVE_PATTERN_POW2); + /// theEmitter->emitIns_R_PATTERN(INS_sve_ptrue, EA_SCALABLE, REG_P7, INS_OPTS_SCALABLE_H, SVE_PATTERN_MUL3); + /// IF_SVE_DZ_1A PTRUE . + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P10, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateTrueMaskBFloat16([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => CreateTrueMaskBFloat16(pattern); + + + /// CreateWhileReadAfterWriteMask : While free of read-after-write conflicts + + /// + /// svbool_t svwhilerw[_bf16](const bfloat16_t *op1, const bfloat16_t *op2) + /// WHILERW Presult.H, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DU_3A WHILERW ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(bfloat16* left, bfloat16* right) => CreateWhileReadAfterWriteMask(left, right); + + + /// CreateWhileWriteAfterReadMask : While free of write-after-read conflicts + + /// + /// svbool_t svwhilewr[_bf16](const bfloat16_t *op1, const bfloat16_t *op2) + /// WHILEWR Presult.H, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DU_3A WHILEWR ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(bfloat16* left, bfloat16* right) => CreateWhileWriteAfterReadMask(left, right); + + + /// DotProductBySelectedScalar : BFloat16 dot product + + /// + /// svfloat32_t svbfdot_lane[_f32](svfloat32_t op1, svbfloat16_t op2, svbfloat16_t op3, uint64_t imm_index) + /// BFDOT Ztied1.S, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; BFDOT Zresult.S, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_GY_3B BFDOT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_bfdot, EA_SCALABLE, REG_V8, REG_V10, REG_V5, 2, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_bfdot, EA_SCALABLE, REG_V12, REG_V14, REG_V7, 3, INS_OPTS_SCALABLE_H); + /// IF_SVE_HA_3A BFDOT .S, .H, .H + /// theEmitter->emitIns_R_R_R(INS_sve_bfdot, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector DotProductBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => DotProductBySelectedScalar(addend, left, right, rightIndex); + + + /// DownConvertNarrowingUpper : Down convert and narrow (top) + + /// + /// svbfloat16_t svcvtnt_bf16[_f32]_m(svbfloat16_t even, svbool_t pg, svfloat32_t op) + /// BFCVTNT Ztied.H, Pg/M, Zop.S + /// svbfloat16_t svcvtnt_bf16[_f32]_x(svbfloat16_t even, svbool_t pg, svfloat32_t op) + /// BFCVTNT Ztied.H, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_GQ_3A BFCVTNT .H, /M, .S + /// theEmitter->emitIns_R_R_R(INS_sve_bfcvtnt, EA_SCALABLE, REG_V3, REG_P0, REG_V4); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector DownConvertNarrowingUpper(Vector value) => DownConvertNarrowingUpper(value); + + + /// DuplicateSelectedScalarToVector : Broadcast a scalar value + + /// + /// svbfloat16_t svdup_lane[_bf16](svbfloat16_t data, uint16_t index) + /// DUP Zresult.H, Zdata.H[index] + /// TBL Zresult.H, Zdata.H, Zindex.H + /// svbfloat16_t svdupq_lane[_bf16](svbfloat16_t data, uint64_t index) + /// DUP Zresult.Q, Zdata.Q[index] + /// TBL Zresult.D, Zdata.D, Zindices_d.D + /// + /// codegenarm64test: + /// IF_SVE_EB_1A DUP ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// IF_SVE_CB_2A DUP ., + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V2, REG_R3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V1, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_8BYTE, REG_V4, REG_SP, INS_OPTS_SCALABLE_D); + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(data, index); + + + /// ExtractAfterLastScalar : Extract element after last + + /// + /// bfloat16_t svlasta[_bf16](svbool_t pg, svbfloat16_t op) + /// LASTA Wresult, Pg, Zop.H + /// LASTA Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_1BYTE, REG_V6, REG_P1, REG_V27, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_2BYTE, REG_V5, REG_P2, REG_V26, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R1, REG_P5, REG_V23, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R0, REG_P6, REG_V22, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe bfloat16 ExtractAfterLastScalar(Vector value) => ExtractAfterLastScalar(value); + + + /// ExtractAfterLastVector : Extract element after last + + /// + /// bfloat16_t svlasta[_bf16](svbool_t pg, svbfloat16_t op) + /// LASTA Wresult, Pg, Zop.H + /// LASTA Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_1BYTE, REG_V6, REG_P1, REG_V27, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_2BYTE, REG_V5, REG_P2, REG_V26, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R1, REG_P5, REG_V23, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R0, REG_P6, REG_V22, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) => ExtractAfterLastVector(value); + + + /// ExtractLastScalar : Extract last element + + /// + /// bfloat16_t svlastb[_bf16](svbool_t pg, svbfloat16_t op) + /// LASTB Wresult, Pg, Zop.H + /// LASTB Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_V4, REG_P3, REG_V25, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_V3, REG_P4, REG_V24, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_R30, REG_P7, REG_V21, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_R29, REG_P0, REG_V20, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe bfloat16 ExtractLastScalar(Vector value) => ExtractLastScalar(value); + + + /// ExtractLastVector : Extract last element + + /// + /// bfloat16_t svlastb[_bf16](svbool_t pg, svbfloat16_t op) + /// LASTB Wresult, Pg, Zop.H + /// LASTB Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_V4, REG_P3, REG_V25, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_V3, REG_P4, REG_V24, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_R30, REG_P7, REG_V21, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_R29, REG_P0, REG_V20, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ExtractLastVector(Vector value) => ExtractLastVector(value); + + + /// ExtractVector : Extract vector from pair of vectors + + /// + /// svbfloat16_t svext[_bf16](svbfloat16_t op1, svbfloat16_t op2, uint64_t imm3) + /// EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 2 + /// MOVPRFX Zresult, Zop1; EXT Zresult.B, Zresult.B, Zop2.B, #imm3 * 2 + /// + /// codegenarm64test: + /// sve_ext - not implemented in coreclr + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) => ExtractVector(upper, lower, index); + + + /// GetActiveElementCount : Count set predicate bits + + /// + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) + /// CNTP Xresult, Pg, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DK_3A CNTP , , . + /// theEmitter->emitIns_R_R_R(INS_sve_cntp, EA_8BYTE, REG_R29, REG_P0, REG_P15, INS_OPTS_SCALABLE_D); + /// IF_SVE_DL_2A CNTP , ., + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) => GetActiveElementCount(mask, from); + + + /// InsertIntoShiftedVector : Insert scalar into shifted vector + + /// + /// svbfloat16_t svinsr[_n_bf16](svbfloat16_t op1, bfloat16_t op2) + /// INSR Ztied1.H, Wop2 + /// INSR Ztied1.H, Hop2 + /// + /// codegenarm64test: + /// sve_insr - not implemented in coreclr + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, bfloat16 right) => InsertIntoShiftedVector(left, right); + + + /// InterleaveEvenInt128FromTwoInputs : Interleave even quadwords from two inputs + + /// + /// svbfloat16_t svtrn1q[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// TRN1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_P1, REG_P3, REG_P4, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) => InterleaveEvenInt128FromTwoInputs(left, right); + + + /// InterleaveInt128FromHighHalvesOfTwoInputs : Interleave quadwords from high halves of two inputs + + /// + /// svbfloat16_t svzip2q[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// ZIP2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V30, REG_V31, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V1, REG_V2, REG_V3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromHighHalvesOfTwoInputs(left, right); + + + /// InterleaveInt128FromLowHalvesOfTwoInputs : Interleave quadwords from low halves of two inputs + + /// + /// svbfloat16_t svzip1q[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// ZIP1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V24, REG_V25, REG_V26, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V27, REG_V28, REG_V29, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromLowHalvesOfTwoInputs(left, right); + + + /// InterleaveOddInt128FromTwoInputs : Interleave odd quadwords from two inputs + + /// + /// svbfloat16_t svtrn2q[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// TRN2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_P5, REG_P2, REG_P7, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) => InterleaveOddInt128FromTwoInputs(left, right); + + + /// LoadVector : Unextended load + + /// + /// svbfloat16_t svld1[_bf16](svbool_t pg, const bfloat16_t *base) + /// LD1H Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVector(Vector mask, bfloat16* address) => LoadVector(mask, address); + + + /// LoadVector128AndReplicateToVector : Load and replicate 128 bits of data + + /// + /// svbfloat16_t svld1rq[_bf16](svbool_t pg, const bfloat16_t *base) + /// LD1RQH Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD1RQH Zresult.H, Pg/Z, [Xarray, #index * 2] + /// LD1RQH Zresult.H, Pg/Z, [Xbase, #0] + /// + /// codegenarm64test: + /// IF_SVE_IO_3A LD1RQH {.H }, /Z, [{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1rqh, EA_SCALABLE, REG_V4, REG_P5, REG_R6, 112, INS_OPTS_SCALABLE_H); + /// IF_SVE_IP_4A LD1RQH {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1rqh, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, bfloat16* address) => LoadVector128AndReplicateToVector(mask, address); + + + /// LoadVector256AndReplicateToVector : Load and replicate 256 bits of data + + /// + /// svbfloat16_t svld1ro[_bf16](svbool_t pg, const bfloat16_t *base) + /// LD1ROH Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD1ROH Zresult.H, Pg/Z, [Xarray, #index * 2] + /// LD1ROH Zresult.H, Pg/Z, [Xbase, #0] + /// + /// codegenarm64test: + /// IF_SVE_IO_3A LD1ROH {.H }, /Z, [{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1roh, EA_SCALABLE, REG_V8, REG_P3, REG_R1, -256, INS_OPTS_SCALABLE_H); + /// IF_SVE_IP_4A LD1ROH {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1roh, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, bfloat16* address) => LoadVector256AndReplicateToVector(mask, address); + + + /// LoadVectorFirstFaulting : Unextended load, first-faulting + + /// + /// svbfloat16_t svldff1[_bf16](svbool_t pg, const bfloat16_t *base) + /// LDFF1H Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1] + /// LDFF1H Zresult.H, Pg/Z, [Xbase, XZR, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, bfloat16* address) => LoadVectorFirstFaulting(mask, address); + + + /// LoadVectorNonFaulting : Unextended load, non-faulting + + /// + /// svbfloat16_t svldnf1[_bf16](svbool_t pg, const bfloat16_t *base) + /// LDNF1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_B LDNF1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1h, EA_SCALABLE, REG_V1, REG_P3, REG_R2, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1h, EA_SCALABLE, REG_V1, REG_P3, REG_R2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1h, EA_SCALABLE, REG_V1, REG_P3, REG_R2, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorNonFaulting(bfloat16* address) => LoadVectorNonFaulting(address); + + + /// LoadVectorNonTemporal : Unextended load, non-temporal + + /// + /// svbfloat16_t svldnt1[_bf16](svbool_t pg, const bfloat16_t *base) + /// LDNT1H Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1] + /// LDNT1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1h, EA_SCALABLE, REG_V6, REG_P7, REG_R8, 0, INS_OPTS_SCALABLE_H); + /// IF_SVE_IF_4A LDNT1H {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_V2, REG_R3, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1H {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V1, REG_P4, REG_V3, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, bfloat16* address) => LoadVectorNonTemporal(mask, address); + + + /// LoadVectorx2 : Load two-element tuples into two vectors + + /// + /// svbfloat16x2_t svld2[_bf16](svbool_t pg, const bfloat16_t *base) + /// LD2H {Zresult0.H, Zresult1.H}, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD2H {Zresult0.H, Zresult1.H}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD2H {.H, .H }, /Z, [{, #, MUL + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld2h, EA_SCALABLE, REG_V6, REG_P5, REG_R4, 8, INS_OPTS_SCALABLE_H); + /// IF_SVE_IT_4A LD2H {.H, .H }, /Z, [, , LSL + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld2h, EA_SCALABLE, REG_V8, REG_P5, REG_R9, REG_R10, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, bfloat16* address) => LoadVectorx2(mask, address); + + + /// LoadVectorx3 : Load three-element tuples into three vectors + + /// + /// svbfloat16x3_t svld3[_bf16](svbool_t pg, const bfloat16_t *base) + /// LD3H {Zresult0.H - Zresult2.H}, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD3H {Zresult0.H - Zresult2.H}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD3H {.H, .H, .H }, /Z, [{, + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld3h, EA_SCALABLE, REG_V0, REG_P0, REG_R0, 21, INS_OPTS_SCALABLE_H); + /// IF_SVE_IT_4A LD3H {.H, .H, .H }, /Z, [, + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld3h, EA_SCALABLE, REG_V30, REG_P2, REG_R9, REG_R4, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, bfloat16* address) => LoadVectorx3(mask, address); + + + /// LoadVectorx4 : Load four-element tuples into four vectors + + /// + /// svbfloat16x4_t svld4[_bf16](svbool_t pg, const bfloat16_t *base) + /// LD4H {Zresult0.H - Zresult3.H}, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD4H {Zresult0.H - Zresult3.H}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD4H {.H, .H, .H, .H }, /Z, + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld4h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, -32, INS_OPTS_SCALABLE_H); + /// IF_SVE_IT_4A LD4H {.H, .H, .H, .H }, /Z, + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld4h, EA_SCALABLE, REG_V13, REG_P6, REG_R5, REG_R4, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, bfloat16* address) => LoadVectorx4(mask, address); + + + /// PopCount : Count nonzero bits + + /// + /// svuint16_t svcnt[_bf16]_m(svuint16_t inactive, svbool_t pg, svbfloat16_t op) + /// CNT Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; CNT Zresult.H, Pg/M, Zop.H + /// svuint16_t svcnt[_bf16]_x(svbool_t pg, svbfloat16_t op) + /// CNT Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; CNT Zresult.H, Pg/M, Zop.H + /// svuint16_t svcnt[_bf16]_z(svbool_t pg, svbfloat16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; CNT Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CNT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_cnt, EA_SCALABLE, REG_V28, REG_P3, REG_V3, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector PopCount(Vector value) => PopCount(value); + + + /// ReverseElement : Reverse all elements + + /// + /// svbfloat16_t svrev[_bf16](svbfloat16_t op) + /// REV Zresult.H, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_CJ_2A REV ., . + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P1, REG_P2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P4, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P3, REG_P7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P0, REG_P6, INS_OPTS_SCALABLE_D); + /// IF_SVE_CG_2A REV ., . + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V4, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V7, REG_V1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ReverseElement(Vector value) => ReverseElement(value); + + + /// Splice : Splice two vectors under predicate control + + /// + /// svbfloat16_t svsplice[_bf16](svbool_t pg, svbfloat16_t op1, svbfloat16_t op2) + /// SPLICE Ztied1.H, Pg, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; SPLICE Zresult.H, Pg, Zresult.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_CV_3A SPLICE ., , {., .} + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V0, REG_P0, REG_V30, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V3, REG_P7, REG_V27, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// IF_SVE_CV_3B SPLICE ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V1, REG_P1, REG_V29, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V2, REG_P6, REG_V28, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) => Splice(mask, left, right); + + + /// Store : Non-truncating store + + /// + /// void svst1[_bf16](svbool_t pg, bfloat16_t *base, svbfloat16_t data) + /// ST1H Zdata.H, Pg, [Xarray, Xindex, LSL #1] + /// ST1H Zdata.H, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Store(Vector mask, bfloat16* address, Vector data) => Store(mask, address, data); + + /// + /// void svst2[_bf16](svbool_t pg, bfloat16_t *base, svbfloat16x2_t data) + /// ST2H {Zdata0.H, Zdata1.H}, Pg, [Xarray, Xindex, LSL #1] + /// ST2H {Zdata0.H, Zdata1.H}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST2H {.H, .H }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st2h, EA_SCALABLE, REG_V6, REG_P7, REG_R8, -16, INS_OPTS_SCALABLE_H); + /// IF_SVE_JC_4A ST2H {.H, .H }, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st2h, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_R6, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, bfloat16* address, (Vector Value1, Vector Value2) data) => Store(mask, address, Value1,); + + /// + /// void svst3[_bf16](svbool_t pg, bfloat16_t *base, svbfloat16x3_t data) + /// ST3H {Zdata0.H - Zdata2.H}, Pg, [Xarray, Xindex, LSL #1] + /// ST3H {Zdata0.H - Zdata2.H}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST3H {.H, .H, .H }, , [{, #, + /// theEmitter->emitIns_R_R_R_I(INS_sve_st3h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -24, INS_OPTS_SCALABLE_H); + /// IF_SVE_JC_4A ST3H {.H, .H, .H }, , [, , + /// theEmitter->emitIns_R_R_R_R(INS_sve_st3h, EA_SCALABLE, REG_V1, REG_P0, REG_R3, REG_R8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, bfloat16* address, (Vector Value1, Vector Value2, Vector Value3) data) => Store(mask, address, Value1,); + + /// + /// void svst4[_bf16](svbool_t pg, bfloat16_t *base, svbfloat16x4_t data) + /// ST4H {Zdata0.H - Zdata3.H}, Pg, [Xarray, Xindex, LSL #1] + /// ST4H {Zdata0.H - Zdata3.H}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST4H {.H, .H, .H, .H }, , [{, + /// theEmitter->emitIns_R_R_R_I(INS_sve_st4h, EA_SCALABLE, REG_V3, REG_P5, REG_R2, -32, INS_OPTS_SCALABLE_H); + /// IF_SVE_JC_4A ST4H {.H, .H, .H, .H }, , + /// theEmitter->emitIns_R_R_R_R(INS_sve_st4h, EA_SCALABLE, REG_V1, REG_P0, REG_R9, REG_R8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, bfloat16* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) => Store(mask, address, Value1,); + + + /// StoreNonTemporal : Non-truncating store, non-temporal + + /// + /// void svstnt1[_bf16](svbool_t pg, bfloat16_t *base, svbfloat16_t data) + /// STNT1H Zdata.H, Pg, [Xarray, Xindex, LSL #1] + /// STNT1H Zdata.H, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1H {.H }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1h, EA_SCALABLE, REG_V9, REG_P1, REG_R0, -5, INS_OPTS_SCALABLE_H); + /// IF_SVE_IZ_4A STNT1H {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V2, REG_P7, REG_V6, REG_R5, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1H {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V5, REG_P3, REG_V1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1H {.H }, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void StoreNonTemporal(Vector mask, bfloat16* address, Vector data) => StoreNonTemporal(mask, address, data); + + + /// TransposeEven : Interleave even elements from two inputs + + /// + /// svbfloat16_t svtrn1[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// TRN1 Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_P1, REG_P3, REG_P4, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) => TransposeEven(left, right); + + + /// TransposeOdd : Interleave odd elements from two inputs + + /// + /// svbfloat16_t svtrn2[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// TRN2 Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_P5, REG_P2, REG_P7, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) => TransposeOdd(left, right); + + + /// UnzipEven : Concatenate even elements from two inputs + + /// + /// svbfloat16_t svuzp1[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// UZP1 Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) => UnzipEven(left, right); + + + /// UnzipOdd : Concatenate odd elements from two inputs + + /// + /// svbfloat16_t svuzp2[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// UZP2 Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) => UnzipOdd(left, right); + + + /// VectorTableLookup : Table lookup in single-vector table + + /// + /// svbfloat16_t svtbl[_bf16](svbfloat16_t data, svuint16_t indices) + /// TBL Zresult.H, Zdata.H, Zindices.H + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) => VectorTableLookup(data, indices); + + /// + /// svbfloat16_t svtbl2[_bf16](svbfloat16x2_t data, svuint16_t indices) + /// TBL Zresult.H, {Zdata0.H, Zdata1.H}, Zindices.H + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) => VectorTableLookup(data1,, indices); + + + /// VectorTableLookupExtension : Table lookup in single-vector table (merging) + + /// + /// svbfloat16_t svtbx[_bf16](svbfloat16_t fallback, svbfloat16_t data, svuint16_t indices) + /// TBX Ztied.H, Zdata.H, Zindices.H + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBX ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_tbx, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_tbx, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) => VectorTableLookupExtension(fallback, data, indices); + + + /// ZipHigh : Interleave elements from high halves of two inputs + + /// + /// svbfloat16_t svzip2[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// ZIP2 Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V30, REG_V31, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V1, REG_V2, REG_V3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) => ZipHigh(left, right); + + + /// ZipLow : Interleave elements from low halves of two inputs + + /// + /// svbfloat16_t svzip1[_bf16](svbfloat16_t op1, svbfloat16_t op2) + /// ZIP1 Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V24, REG_V25, REG_V26, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V27, REG_V28, REG_V29, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ZipLow(Vector left, Vector right) => ZipLow(left, right); + + } +} + diff --git a/sve_api/out_helper_api/SveBitperm.cs b/sve_api/out_helper_api/SveBitperm.cs new file mode 100644 index 0000000000000..37f8e53967068 --- /dev/null +++ b/sve_api/out_helper_api/SveBitperm.cs @@ -0,0 +1,164 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveBitperm : AdvSimd + { + internal SveBitperm() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// GatherLowerBitsFromPositionsSelectedByBitmask : Gather lower bits from positions selected by bitmask + + /// + /// svuint8_t svbext[_u8](svuint8_t op1, svuint8_t op2) + /// BEXT Zresult.B, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FQ_3A BEXT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_bext, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector GatherLowerBitsFromPositionsSelectedByBitmask(Vector left, Vector right) => GatherLowerBitsFromPositionsSelectedByBitmask(left, right); + + /// + /// svuint16_t svbext[_u16](svuint16_t op1, svuint16_t op2) + /// BEXT Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FQ_3A BEXT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_bext, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector GatherLowerBitsFromPositionsSelectedByBitmask(Vector left, Vector right) => GatherLowerBitsFromPositionsSelectedByBitmask(left, right); + + /// + /// svuint32_t svbext[_u32](svuint32_t op1, svuint32_t op2) + /// BEXT Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FQ_3A BEXT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_bext, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector GatherLowerBitsFromPositionsSelectedByBitmask(Vector left, Vector right) => GatherLowerBitsFromPositionsSelectedByBitmask(left, right); + + /// + /// svuint64_t svbext[_u64](svuint64_t op1, svuint64_t op2) + /// BEXT Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_FQ_3A BEXT ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_bext, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector GatherLowerBitsFromPositionsSelectedByBitmask(Vector left, Vector right) => GatherLowerBitsFromPositionsSelectedByBitmask(left, right); + + + /// GroupBitsToRightOrLeftAsSelectedByBitmask : Group bits to right or left as selected by bitmask + + /// + /// svuint8_t svbgrp[_u8](svuint8_t op1, svuint8_t op2) + /// BGRP Zresult.B, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FQ_3A BGRP ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_bgrp, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_bgrp, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GroupBitsToRightOrLeftAsSelectedByBitmask(Vector left, Vector right) => GroupBitsToRightOrLeftAsSelectedByBitmask(left, right); + + /// + /// svuint16_t svbgrp[_u16](svuint16_t op1, svuint16_t op2) + /// BGRP Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FQ_3A BGRP ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_bgrp, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_bgrp, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GroupBitsToRightOrLeftAsSelectedByBitmask(Vector left, Vector right) => GroupBitsToRightOrLeftAsSelectedByBitmask(left, right); + + /// + /// svuint32_t svbgrp[_u32](svuint32_t op1, svuint32_t op2) + /// BGRP Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FQ_3A BGRP ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_bgrp, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_bgrp, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GroupBitsToRightOrLeftAsSelectedByBitmask(Vector left, Vector right) => GroupBitsToRightOrLeftAsSelectedByBitmask(left, right); + + /// + /// svuint64_t svbgrp[_u64](svuint64_t op1, svuint64_t op2) + /// BGRP Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_FQ_3A BGRP ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_bgrp, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_bgrp, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector GroupBitsToRightOrLeftAsSelectedByBitmask(Vector left, Vector right) => GroupBitsToRightOrLeftAsSelectedByBitmask(left, right); + + + /// ScatterLowerBitsIntoPositionsSelectedByBitmask : Scatter lower bits into positions selected by bitmask + + /// + /// svuint8_t svbdep[_u8](svuint8_t op1, svuint8_t op2) + /// BDEP Zresult.B, Zop1.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_FQ_3A BDEP ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_bdep, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ScatterLowerBitsIntoPositionsSelectedByBitmask(Vector left, Vector right) => ScatterLowerBitsIntoPositionsSelectedByBitmask(left, right); + + /// + /// svuint16_t svbdep[_u16](svuint16_t op1, svuint16_t op2) + /// BDEP Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_FQ_3A BDEP ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_bdep, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ScatterLowerBitsIntoPositionsSelectedByBitmask(Vector left, Vector right) => ScatterLowerBitsIntoPositionsSelectedByBitmask(left, right); + + /// + /// svuint32_t svbdep[_u32](svuint32_t op1, svuint32_t op2) + /// BDEP Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_FQ_3A BDEP ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_bdep, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ScatterLowerBitsIntoPositionsSelectedByBitmask(Vector left, Vector right) => ScatterLowerBitsIntoPositionsSelectedByBitmask(left, right); + + /// + /// svuint64_t svbdep[_u64](svuint64_t op1, svuint64_t op2) + /// BDEP Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_FQ_3A BDEP ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_bdep, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ScatterLowerBitsIntoPositionsSelectedByBitmask(Vector left, Vector right) => ScatterLowerBitsIntoPositionsSelectedByBitmask(left, right); + + } +} + diff --git a/sve_api/out_helper_api/SveF32mm.cs b/sve_api/out_helper_api/SveF32mm.cs new file mode 100644 index 0000000000000..eae31fd8b5eca --- /dev/null +++ b/sve_api/out_helper_api/SveF32mm.cs @@ -0,0 +1,45 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveF32mm : AdvSimd + { + internal SveF32mm() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// MatrixMultiplyAccumulate : Matrix multiply-accumulate + + /// + /// svfloat32_t svmmla[_f32](svfloat32_t op1, svfloat32_t op2, svfloat32_t op3) + /// FMMLA Ztied1.S, Zop2.S, Zop3.S + /// MOVPRFX Zresult, Zop1; FMMLA Zresult.S, Zop2.S, Zop3.S + /// + /// codegenarm64test: + /// IF_SVE_HD_3A_A FMMLA .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_fmmla, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector MatrixMultiplyAccumulate(Vector op1, Vector op2, Vector op3) => MatrixMultiplyAccumulate(op1, op2, op3); + + } +} + diff --git a/sve_api/out_helper_api/SveF64mm.cs b/sve_api/out_helper_api/SveF64mm.cs new file mode 100644 index 0000000000000..791c6fe0fdfb1 --- /dev/null +++ b/sve_api/out_helper_api/SveF64mm.cs @@ -0,0 +1,1106 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveF64mm : AdvSimd + { + internal SveF64mm() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// ConcatenateEvenInt128FromTwoInputs : Concatenate even quadwords from two inputs + + /// + /// svint8_t svuzp1q[_s8](svint8_t op1, svint8_t op2) + /// UZP1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) => ConcatenateEvenInt128FromTwoInputs(left, right); + + /// + /// svint16_t svuzp1q[_s16](svint16_t op1, svint16_t op2) + /// UZP1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) => ConcatenateEvenInt128FromTwoInputs(left, right); + + /// + /// svint32_t svuzp1q[_s32](svint32_t op1, svint32_t op2) + /// UZP1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) => ConcatenateEvenInt128FromTwoInputs(left, right); + + /// + /// svint64_t svuzp1q[_s64](svint64_t op1, svint64_t op2) + /// UZP1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) => ConcatenateEvenInt128FromTwoInputs(left, right); + + /// + /// svuint8_t svuzp1q[_u8](svuint8_t op1, svuint8_t op2) + /// UZP1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) => ConcatenateEvenInt128FromTwoInputs(left, right); + + /// + /// svuint16_t svuzp1q[_u16](svuint16_t op1, svuint16_t op2) + /// UZP1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) => ConcatenateEvenInt128FromTwoInputs(left, right); + + /// + /// svuint32_t svuzp1q[_u32](svuint32_t op1, svuint32_t op2) + /// UZP1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) => ConcatenateEvenInt128FromTwoInputs(left, right); + + /// + /// svuint64_t svuzp1q[_u64](svuint64_t op1, svuint64_t op2) + /// UZP1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) => ConcatenateEvenInt128FromTwoInputs(left, right); + + /// + /// svfloat32_t svuzp1q[_f32](svfloat32_t op1, svfloat32_t op2) + /// UZP1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) => ConcatenateEvenInt128FromTwoInputs(left, right); + + /// + /// svfloat64_t svuzp1q[_f64](svfloat64_t op1, svfloat64_t op2) + /// UZP1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) => ConcatenateEvenInt128FromTwoInputs(left, right); + + + /// ConcatenateOddInt128FromTwoInputs : Concatenate odd quadwords from two inputs + + /// + /// svint8_t svuzp2q[_s8](svint8_t op1, svint8_t op2) + /// UZP2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) => ConcatenateOddInt128FromTwoInputs(left, right); + + /// + /// svint16_t svuzp2q[_s16](svint16_t op1, svint16_t op2) + /// UZP2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) => ConcatenateOddInt128FromTwoInputs(left, right); + + /// + /// svint32_t svuzp2q[_s32](svint32_t op1, svint32_t op2) + /// UZP2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) => ConcatenateOddInt128FromTwoInputs(left, right); + + /// + /// svint64_t svuzp2q[_s64](svint64_t op1, svint64_t op2) + /// UZP2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) => ConcatenateOddInt128FromTwoInputs(left, right); + + /// + /// svuint8_t svuzp2q[_u8](svuint8_t op1, svuint8_t op2) + /// UZP2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) => ConcatenateOddInt128FromTwoInputs(left, right); + + /// + /// svuint16_t svuzp2q[_u16](svuint16_t op1, svuint16_t op2) + /// UZP2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) => ConcatenateOddInt128FromTwoInputs(left, right); + + /// + /// svuint32_t svuzp2q[_u32](svuint32_t op1, svuint32_t op2) + /// UZP2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) => ConcatenateOddInt128FromTwoInputs(left, right); + + /// + /// svuint64_t svuzp2q[_u64](svuint64_t op1, svuint64_t op2) + /// UZP2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) => ConcatenateOddInt128FromTwoInputs(left, right); + + /// + /// svfloat32_t svuzp2q[_f32](svfloat32_t op1, svfloat32_t op2) + /// UZP2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) => ConcatenateOddInt128FromTwoInputs(left, right); + + /// + /// svfloat64_t svuzp2q[_f64](svfloat64_t op1, svfloat64_t op2) + /// UZP2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) => ConcatenateOddInt128FromTwoInputs(left, right); + + + /// InterleaveEvenInt128FromTwoInputs : Interleave even quadwords from two inputs + + /// + /// svint8_t svtrn1q[_s8](svint8_t op1, svint8_t op2) + /// TRN1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_P1, REG_P3, REG_P4, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) => InterleaveEvenInt128FromTwoInputs(left, right); + + /// + /// svint16_t svtrn1q[_s16](svint16_t op1, svint16_t op2) + /// TRN1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_P1, REG_P3, REG_P4, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) => InterleaveEvenInt128FromTwoInputs(left, right); + + /// + /// svint32_t svtrn1q[_s32](svint32_t op1, svint32_t op2) + /// TRN1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_P1, REG_P3, REG_P4, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) => InterleaveEvenInt128FromTwoInputs(left, right); + + /// + /// svint64_t svtrn1q[_s64](svint64_t op1, svint64_t op2) + /// TRN1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_P1, REG_P3, REG_P4, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) => InterleaveEvenInt128FromTwoInputs(left, right); + + /// + /// svuint8_t svtrn1q[_u8](svuint8_t op1, svuint8_t op2) + /// TRN1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_P1, REG_P3, REG_P4, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) => InterleaveEvenInt128FromTwoInputs(left, right); + + /// + /// svuint16_t svtrn1q[_u16](svuint16_t op1, svuint16_t op2) + /// TRN1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_P1, REG_P3, REG_P4, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) => InterleaveEvenInt128FromTwoInputs(left, right); + + /// + /// svuint32_t svtrn1q[_u32](svuint32_t op1, svuint32_t op2) + /// TRN1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_P1, REG_P3, REG_P4, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) => InterleaveEvenInt128FromTwoInputs(left, right); + + /// + /// svuint64_t svtrn1q[_u64](svuint64_t op1, svuint64_t op2) + /// TRN1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_P1, REG_P3, REG_P4, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) => InterleaveEvenInt128FromTwoInputs(left, right); + + /// + /// svfloat32_t svtrn1q[_f32](svfloat32_t op1, svfloat32_t op2) + /// TRN1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_P1, REG_P3, REG_P4, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) => InterleaveEvenInt128FromTwoInputs(left, right); + + /// + /// svfloat64_t svtrn1q[_f64](svfloat64_t op1, svfloat64_t op2) + /// TRN1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_P1, REG_P3, REG_P4, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) => InterleaveEvenInt128FromTwoInputs(left, right); + + + /// InterleaveInt128FromHighHalvesOfTwoInputs : Interleave quadwords from high halves of two inputs + + /// + /// svint8_t svzip2q[_s8](svint8_t op1, svint8_t op2) + /// ZIP2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V30, REG_V31, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V1, REG_V2, REG_V3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromHighHalvesOfTwoInputs(left, right); + + /// + /// svint16_t svzip2q[_s16](svint16_t op1, svint16_t op2) + /// ZIP2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V30, REG_V31, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V1, REG_V2, REG_V3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromHighHalvesOfTwoInputs(left, right); + + /// + /// svint32_t svzip2q[_s32](svint32_t op1, svint32_t op2) + /// ZIP2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V30, REG_V31, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V1, REG_V2, REG_V3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromHighHalvesOfTwoInputs(left, right); + + /// + /// svint64_t svzip2q[_s64](svint64_t op1, svint64_t op2) + /// ZIP2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V30, REG_V31, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V1, REG_V2, REG_V3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromHighHalvesOfTwoInputs(left, right); + + /// + /// svuint8_t svzip2q[_u8](svuint8_t op1, svuint8_t op2) + /// ZIP2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V30, REG_V31, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V1, REG_V2, REG_V3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromHighHalvesOfTwoInputs(left, right); + + /// + /// svuint16_t svzip2q[_u16](svuint16_t op1, svuint16_t op2) + /// ZIP2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V30, REG_V31, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V1, REG_V2, REG_V3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromHighHalvesOfTwoInputs(left, right); + + /// + /// svuint32_t svzip2q[_u32](svuint32_t op1, svuint32_t op2) + /// ZIP2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V30, REG_V31, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V1, REG_V2, REG_V3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromHighHalvesOfTwoInputs(left, right); + + /// + /// svuint64_t svzip2q[_u64](svuint64_t op1, svuint64_t op2) + /// ZIP2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V30, REG_V31, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V1, REG_V2, REG_V3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromHighHalvesOfTwoInputs(left, right); + + /// + /// svfloat32_t svzip2q[_f32](svfloat32_t op1, svfloat32_t op2) + /// ZIP2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V30, REG_V31, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V1, REG_V2, REG_V3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromHighHalvesOfTwoInputs(left, right); + + /// + /// svfloat64_t svzip2q[_f64](svfloat64_t op1, svfloat64_t op2) + /// ZIP2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V30, REG_V31, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V1, REG_V2, REG_V3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromHighHalvesOfTwoInputs(left, right); + + + /// InterleaveInt128FromLowHalvesOfTwoInputs : Interleave quadwords from low halves of two inputs + + /// + /// svint8_t svzip1q[_s8](svint8_t op1, svint8_t op2) + /// ZIP1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V24, REG_V25, REG_V26, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V27, REG_V28, REG_V29, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromLowHalvesOfTwoInputs(left, right); + + /// + /// svint16_t svzip1q[_s16](svint16_t op1, svint16_t op2) + /// ZIP1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V24, REG_V25, REG_V26, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V27, REG_V28, REG_V29, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromLowHalvesOfTwoInputs(left, right); + + /// + /// svint32_t svzip1q[_s32](svint32_t op1, svint32_t op2) + /// ZIP1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V24, REG_V25, REG_V26, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V27, REG_V28, REG_V29, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromLowHalvesOfTwoInputs(left, right); + + /// + /// svint64_t svzip1q[_s64](svint64_t op1, svint64_t op2) + /// ZIP1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V24, REG_V25, REG_V26, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V27, REG_V28, REG_V29, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromLowHalvesOfTwoInputs(left, right); + + /// + /// svuint8_t svzip1q[_u8](svuint8_t op1, svuint8_t op2) + /// ZIP1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V24, REG_V25, REG_V26, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V27, REG_V28, REG_V29, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromLowHalvesOfTwoInputs(left, right); + + /// + /// svuint16_t svzip1q[_u16](svuint16_t op1, svuint16_t op2) + /// ZIP1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V24, REG_V25, REG_V26, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V27, REG_V28, REG_V29, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromLowHalvesOfTwoInputs(left, right); + + /// + /// svuint32_t svzip1q[_u32](svuint32_t op1, svuint32_t op2) + /// ZIP1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V24, REG_V25, REG_V26, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V27, REG_V28, REG_V29, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromLowHalvesOfTwoInputs(left, right); + + /// + /// svuint64_t svzip1q[_u64](svuint64_t op1, svuint64_t op2) + /// ZIP1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V24, REG_V25, REG_V26, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V27, REG_V28, REG_V29, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromLowHalvesOfTwoInputs(left, right); + + /// + /// svfloat32_t svzip1q[_f32](svfloat32_t op1, svfloat32_t op2) + /// ZIP1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V24, REG_V25, REG_V26, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V27, REG_V28, REG_V29, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromLowHalvesOfTwoInputs(left, right); + + /// + /// svfloat64_t svzip1q[_f64](svfloat64_t op1, svfloat64_t op2) + /// ZIP1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V24, REG_V25, REG_V26, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V27, REG_V28, REG_V29, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromLowHalvesOfTwoInputs(left, right); + + + /// InterleaveOddInt128FromTwoInputs : Interleave odd quadwords from two inputs + + /// + /// svint8_t svtrn2q[_s8](svint8_t op1, svint8_t op2) + /// TRN2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_P5, REG_P2, REG_P7, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) => InterleaveOddInt128FromTwoInputs(left, right); + + /// + /// svint16_t svtrn2q[_s16](svint16_t op1, svint16_t op2) + /// TRN2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_P5, REG_P2, REG_P7, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) => InterleaveOddInt128FromTwoInputs(left, right); + + /// + /// svint32_t svtrn2q[_s32](svint32_t op1, svint32_t op2) + /// TRN2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_P5, REG_P2, REG_P7, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) => InterleaveOddInt128FromTwoInputs(left, right); + + /// + /// svint64_t svtrn2q[_s64](svint64_t op1, svint64_t op2) + /// TRN2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_P5, REG_P2, REG_P7, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) => InterleaveOddInt128FromTwoInputs(left, right); + + /// + /// svuint8_t svtrn2q[_u8](svuint8_t op1, svuint8_t op2) + /// TRN2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_P5, REG_P2, REG_P7, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) => InterleaveOddInt128FromTwoInputs(left, right); + + /// + /// svuint16_t svtrn2q[_u16](svuint16_t op1, svuint16_t op2) + /// TRN2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_P5, REG_P2, REG_P7, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) => InterleaveOddInt128FromTwoInputs(left, right); + + /// + /// svuint32_t svtrn2q[_u32](svuint32_t op1, svuint32_t op2) + /// TRN2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_P5, REG_P2, REG_P7, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) => InterleaveOddInt128FromTwoInputs(left, right); + + /// + /// svuint64_t svtrn2q[_u64](svuint64_t op1, svuint64_t op2) + /// TRN2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_P5, REG_P2, REG_P7, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) => InterleaveOddInt128FromTwoInputs(left, right); + + /// + /// svfloat32_t svtrn2q[_f32](svfloat32_t op1, svfloat32_t op2) + /// TRN2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_P5, REG_P2, REG_P7, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) => InterleaveOddInt128FromTwoInputs(left, right); + + /// + /// svfloat64_t svtrn2q[_f64](svfloat64_t op1, svfloat64_t op2) + /// TRN2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_P5, REG_P2, REG_P7, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) => InterleaveOddInt128FromTwoInputs(left, right); + + + /// LoadVector256AndReplicateToVector : Load and replicate 256 bits of data + + /// + /// svint8_t svld1ro[_s8](svbool_t pg, const int8_t *base) + /// LD1ROB Zresult.B, Pg/Z, [Xarray, Xindex] + /// LD1ROB Zresult.B, Pg/Z, [Xarray, #index] + /// LD1ROB Zresult.B, Pg/Z, [Xbase, #0] + /// + /// codegenarm64test: + /// IF_SVE_IO_3A LD1ROB {.B }, /Z, [{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1rob, EA_SCALABLE, REG_V0, REG_P1, REG_R2, 0, INS_OPTS_SCALABLE_B); + /// IF_SVE_IP_4A LD1ROB {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1rob, EA_SCALABLE, REG_V0, REG_P1, REG_R3, REG_R2, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, sbyte* address) => LoadVector256AndReplicateToVector(mask, address); + + /// + /// svint16_t svld1ro[_s16](svbool_t pg, const int16_t *base) + /// LD1ROH Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD1ROH Zresult.H, Pg/Z, [Xarray, #index * 2] + /// LD1ROH Zresult.H, Pg/Z, [Xbase, #0] + /// + /// codegenarm64test: + /// IF_SVE_IO_3A LD1ROH {.H }, /Z, [{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1roh, EA_SCALABLE, REG_V8, REG_P3, REG_R1, -256, INS_OPTS_SCALABLE_H); + /// IF_SVE_IP_4A LD1ROH {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1roh, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, short* address) => LoadVector256AndReplicateToVector(mask, address); + + /// + /// svint32_t svld1ro[_s32](svbool_t pg, const int32_t *base) + /// LD1ROW Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2] + /// LD1ROW Zresult.S, Pg/Z, [Xarray, #index * 4] + /// LD1ROW Zresult.S, Pg/Z, [Xbase, #0] + /// + /// codegenarm64test: + /// IF_SVE_IO_3A LD1ROW {.S }, /Z, [{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1row, EA_SCALABLE, REG_V3, REG_P4, REG_R0, 224, INS_OPTS_SCALABLE_S); + /// IF_SVE_IP_4A LD1ROW {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1row, EA_SCALABLE, REG_V1, REG_P3, REG_R2, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, int* address) => LoadVector256AndReplicateToVector(mask, address); + + /// + /// svint64_t svld1ro[_s64](svbool_t pg, const int64_t *base) + /// LD1ROD Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3] + /// LD1ROD Zresult.D, Pg/Z, [Xarray, #index * 8] + /// LD1ROD Zresult.D, Pg/Z, [Xbase, #0] + /// + /// codegenarm64test: + /// IF_SVE_IO_3A LD1ROD {.D }, /Z, [{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1rod, EA_SCALABLE, REG_V4, REG_P5, REG_R6, -32, INS_OPTS_SCALABLE_D); + /// IF_SVE_IP_4A LD1ROD {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1rod, EA_SCALABLE, REG_V0, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, long* address) => LoadVector256AndReplicateToVector(mask, address); + + /// + /// svuint8_t svld1ro[_u8](svbool_t pg, const uint8_t *base) + /// LD1ROB Zresult.B, Pg/Z, [Xarray, Xindex] + /// LD1ROB Zresult.B, Pg/Z, [Xarray, #index] + /// LD1ROB Zresult.B, Pg/Z, [Xbase, #0] + /// + /// codegenarm64test: + /// IF_SVE_IO_3A LD1ROB {.B }, /Z, [{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1rob, EA_SCALABLE, REG_V0, REG_P1, REG_R2, 0, INS_OPTS_SCALABLE_B); + /// IF_SVE_IP_4A LD1ROB {.B }, /Z, [, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1rob, EA_SCALABLE, REG_V0, REG_P1, REG_R3, REG_R2, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, byte* address) => LoadVector256AndReplicateToVector(mask, address); + + /// + /// svuint16_t svld1ro[_u16](svbool_t pg, const uint16_t *base) + /// LD1ROH Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD1ROH Zresult.H, Pg/Z, [Xarray, #index * 2] + /// LD1ROH Zresult.H, Pg/Z, [Xbase, #0] + /// + /// codegenarm64test: + /// IF_SVE_IO_3A LD1ROH {.H }, /Z, [{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1roh, EA_SCALABLE, REG_V8, REG_P3, REG_R1, -256, INS_OPTS_SCALABLE_H); + /// IF_SVE_IP_4A LD1ROH {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1roh, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, ushort* address) => LoadVector256AndReplicateToVector(mask, address); + + /// + /// svuint32_t svld1ro[_u32](svbool_t pg, const uint32_t *base) + /// LD1ROW Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2] + /// LD1ROW Zresult.S, Pg/Z, [Xarray, #index * 4] + /// LD1ROW Zresult.S, Pg/Z, [Xbase, #0] + /// + /// codegenarm64test: + /// IF_SVE_IO_3A LD1ROW {.S }, /Z, [{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1row, EA_SCALABLE, REG_V3, REG_P4, REG_R0, 224, INS_OPTS_SCALABLE_S); + /// IF_SVE_IP_4A LD1ROW {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1row, EA_SCALABLE, REG_V1, REG_P3, REG_R2, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, uint* address) => LoadVector256AndReplicateToVector(mask, address); + + /// + /// svuint64_t svld1ro[_u64](svbool_t pg, const uint64_t *base) + /// LD1ROD Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3] + /// LD1ROD Zresult.D, Pg/Z, [Xarray, #index * 8] + /// LD1ROD Zresult.D, Pg/Z, [Xbase, #0] + /// + /// codegenarm64test: + /// IF_SVE_IO_3A LD1ROD {.D }, /Z, [{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1rod, EA_SCALABLE, REG_V4, REG_P5, REG_R6, -32, INS_OPTS_SCALABLE_D); + /// IF_SVE_IP_4A LD1ROD {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1rod, EA_SCALABLE, REG_V0, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, ulong* address) => LoadVector256AndReplicateToVector(mask, address); + + /// + /// svfloat32_t svld1ro[_f32](svbool_t pg, const float32_t *base) + /// LD1ROW Zresult.S, Pg/Z, [Xarray, Xindex, LSL #2] + /// LD1ROW Zresult.S, Pg/Z, [Xarray, #index * 4] + /// LD1ROW Zresult.S, Pg/Z, [Xbase, #0] + /// + /// codegenarm64test: + /// IF_SVE_IO_3A LD1ROW {.S }, /Z, [{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1row, EA_SCALABLE, REG_V3, REG_P4, REG_R0, 224, INS_OPTS_SCALABLE_S); + /// IF_SVE_IP_4A LD1ROW {.S }, /Z, [, , LSL #2] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1row, EA_SCALABLE, REG_V1, REG_P3, REG_R2, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, float* address) => LoadVector256AndReplicateToVector(mask, address); + + /// + /// svfloat64_t svld1ro[_f64](svbool_t pg, const float64_t *base) + /// LD1ROD Zresult.D, Pg/Z, [Xarray, Xindex, LSL #3] + /// LD1ROD Zresult.D, Pg/Z, [Xarray, #index * 8] + /// LD1ROD Zresult.D, Pg/Z, [Xbase, #0] + /// + /// codegenarm64test: + /// IF_SVE_IO_3A LD1ROD {.D }, /Z, [{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1rod, EA_SCALABLE, REG_V4, REG_P5, REG_R6, -32, INS_OPTS_SCALABLE_D); + /// IF_SVE_IP_4A LD1ROD {.D }, /Z, [, , LSL #3] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1rod, EA_SCALABLE, REG_V0, REG_P2, REG_R1, REG_R3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, double* address) => LoadVector256AndReplicateToVector(mask, address); + + + /// MatrixMultiplyAccumulate : Matrix multiply-accumulate + + /// + /// svfloat64_t svmmla[_f64](svfloat64_t op1, svfloat64_t op2, svfloat64_t op3) + /// FMMLA Ztied1.D, Zop2.D, Zop3.D + /// MOVPRFX Zresult, Zop1; FMMLA Zresult.D, Zop2.D, Zop3.D + /// + /// codegenarm64test: + /// IF_SVE_HD_3A_A FMMLA .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_fmmla, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector MatrixMultiplyAccumulate(Vector op1, Vector op2, Vector op3) => MatrixMultiplyAccumulate(op1, op2, op3); + + } +} + diff --git a/sve_api/out_helper_api/SveFp16.cs b/sve_api/out_helper_api/SveFp16.cs new file mode 100644 index 0000000000000..4b1e581cea3b5 --- /dev/null +++ b/sve_api/out_helper_api/SveFp16.cs @@ -0,0 +1,2714 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveFp16 : AdvSimd + { + internal SveFp16() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// Abs : Absolute value + + /// + /// svfloat16_t svabs[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) + /// FABS Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; FABS Zresult.H, Pg/M, Zop.H + /// svfloat16_t svabs[_f16]_x(svbool_t pg, svfloat16_t op) + /// FABS Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; FABS Zresult.H, Pg/M, Zop.H + /// svfloat16_t svabs[_f16]_z(svbool_t pg, svfloat16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; FABS Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_AP_3A FABS ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_fabs, EA_SCALABLE, REG_V27, REG_P4, REG_V4, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Abs(Vector value) => Abs(value); + + + /// AbsoluteCompareGreaterThan : Absolute compare greater than + + /// + /// svbool_t svacgt[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FACGT Presult.H, Pg/Z, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FACGT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_facgt, EA_SCALABLE, REG_P15, REG_P1, REG_V20, REG_V21, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AbsoluteCompareGreaterThan(Vector left, Vector right) => AbsoluteCompareGreaterThan(left, right); + + + /// AbsoluteCompareGreaterThanOrEqual : Absolute compare greater than or equal to + + /// + /// svbool_t svacge[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FACGE Presult.H, Pg/Z, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FACGE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_facge, EA_SCALABLE, REG_P0, REG_P0, REG_V10, REG_V31, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AbsoluteCompareGreaterThanOrEqual(Vector left, Vector right) => AbsoluteCompareGreaterThanOrEqual(left, right); + + + /// AbsoluteCompareLessThan : Absolute compare less than + + /// + /// svbool_t svaclt[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FACGT Presult.H, Pg/Z, Zop2.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FACGT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_facgt, EA_SCALABLE, REG_P15, REG_P1, REG_V20, REG_V21, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AbsoluteCompareLessThan(Vector left, Vector right) => AbsoluteCompareLessThan(left, right); + + + /// AbsoluteCompareLessThanOrEqual : Absolute compare less than or equal to + + /// + /// svbool_t svacle[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FACGE Presult.H, Pg/Z, Zop2.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FACGE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_facge, EA_SCALABLE, REG_P0, REG_P0, REG_V10, REG_V31, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AbsoluteCompareLessThanOrEqual(Vector left, Vector right) => AbsoluteCompareLessThanOrEqual(left, right); + + + /// AbsoluteDifference : Absolute difference + + /// + /// svfloat16_t svabd[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FABD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; FABD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svfloat16_t svabd[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FABD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// FABD Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; FABD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svfloat16_t svabd[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; FABD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; FABD Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FABD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fabd, EA_SCALABLE, REG_V24, REG_P3, REG_V11, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AbsoluteDifference(Vector left, Vector right) => AbsoluteDifference(left, right); + + + /// Add : Add + + /// + /// svfloat16_t svadd[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; FADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svfloat16_t svadd[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// FADD Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// FADD Zresult.H, Zop1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; FADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svfloat16_t svadd[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; FADD Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; FADD Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FADD ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fadd, EA_SCALABLE, REG_V25, REG_P2, REG_V10, INS_OPTS_SCALABLE_S); + /// IF_SVE_HK_3A FADD ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fadd, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_HM_2A FADD ., /M, ., + /// theEmitter->emitIns_R_R_F(INS_sve_fadd, EA_SCALABLE, REG_V0, REG_P0, 0.5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_F(INS_sve_fadd, EA_SCALABLE, REG_V0, REG_P1, 1.0, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Add(Vector left, Vector right) => Add(left, right); + + + /// AddAcross : Add reduction + + /// + /// float16_t svaddv[_f16](svbool_t pg, svfloat16_t op) + /// FADDV Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_HE_3A FADDV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_faddv, EA_2BYTE, REG_V21, REG_P7, REG_V7, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddAcross(Vector value) => AddAcross(value); + + + /// AddPairwise : Add pairwise + + /// + /// svfloat16_t svaddp[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FADDP Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; FADDP Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svfloat16_t svaddp[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FADDP Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; FADDP Zresult.H, Pg/M, Zresult.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_GR_3A FADDP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_faddp, EA_SCALABLE, REG_V16, REG_P3, REG_V19, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddPairwise(Vector left, Vector right) => AddPairwise(left, right); + + + /// AddRotateComplex : Complex add with rotate + + /// + /// svfloat16_t svcadd[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2, uint64_t imm_rotation) + /// FCADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H, #imm_rotation + /// MOVPRFX Zresult, Zop1; FCADD Zresult.H, Pg/M, Zresult.H, Zop2.H, #imm_rotation + /// svfloat16_t svcadd[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2, uint64_t imm_rotation) + /// FCADD Ztied1.H, Pg/M, Ztied1.H, Zop2.H, #imm_rotation + /// MOVPRFX Zresult, Zop1; FCADD Zresult.H, Pg/M, Zresult.H, Zop2.H, #imm_rotation + /// svfloat16_t svcadd[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2, uint64_t imm_rotation) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; FCADD Zresult.H, Pg/M, Zresult.H, Zop2.H, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_GP_3A FCADD ., /M, ., ., + /// theEmitter->emitIns_R_R_R_I(INS_sve_fcadd, EA_SCALABLE, REG_V0, REG_P1, REG_V2, 90, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fcadd, EA_SCALABLE, REG_V0, REG_P1, REG_V2, 270, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fcadd, EA_SCALABLE, REG_V0, REG_P1, REG_V2, 270, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fcadd, EA_SCALABLE, REG_V0, REG_P1, REG_V2, 270, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation) => AddRotateComplex(left, right, rotation); + + + /// AddSequentialAcross : Add reduction (strictly-ordered) + + /// + /// float16_t svadda[_f16](svbool_t pg, float16_t initial, svfloat16_t op) + /// FADDA Htied, Pg, Htied, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_HJ_3A FADDA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_fadda, EA_2BYTE, REG_V21, REG_P6, REG_V14, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_fadda, EA_4BYTE, REG_V22, REG_P5, REG_V13, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_fadda, EA_8BYTE, REG_V23, REG_P4, REG_V12, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector AddSequentialAcross(Vector initial, Vector value) => AddSequentialAcross(initial, value); + + + /// CompareEqual : Compare equal to + + /// + /// svbool_t svcmpeq[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FCMEQ Presult.H, Pg/Z, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FCMEQ ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fcmeq, EA_SCALABLE, REG_P2, REG_P4, REG_V28, REG_V8, INS_OPTS_SCALABLE_S); + /// IF_SVE_HI_3A FCMEQ ., /Z, ., #0.0 + /// theEmitter->emitIns_R_R_R(INS_sve_fcmeq, EA_SCALABLE, REG_P2, REG_P3, REG_V4, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareEqual(Vector left, Vector right) => CompareEqual(left, right); + + + /// CompareGreaterThan : Compare greater than + + /// + /// svbool_t svcmpgt[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FCMGT Presult.H, Pg/Z, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FCMGT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fcmgt, EA_SCALABLE, REG_P3, REG_P6, REG_V18, REG_V28, INS_OPTS_SCALABLE_H); + /// IF_SVE_HI_3A FCMGT ., /Z, ., #0.0 + /// theEmitter->emitIns_R_R_R(INS_sve_fcmgt, EA_SCALABLE, REG_P11, REG_P5, REG_V2, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThan(Vector left, Vector right) => CompareGreaterThan(left, right); + + + /// CompareGreaterThanOrEqual : Compare greater than or equal to + + /// + /// svbool_t svcmpge[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FCMGE Presult.H, Pg/Z, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FCMGE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fcmge, EA_SCALABLE, REG_P13, REG_P5, REG_V8, REG_V18, INS_OPTS_SCALABLE_D); + /// IF_SVE_HI_3A FCMGE ., /Z, ., #0.0 + /// theEmitter->emitIns_R_R_R(INS_sve_fcmge, EA_SCALABLE, REG_P1, REG_P2, REG_V3, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right) => CompareGreaterThanOrEqual(left, right); + + + /// CompareLessThan : Compare less than + + /// + /// svbool_t svcmplt[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FCMGT Presult.H, Pg/Z, Zop2.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FCMGT ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fcmgt, EA_SCALABLE, REG_P3, REG_P6, REG_V18, REG_V28, INS_OPTS_SCALABLE_H); + /// IF_SVE_HI_3A FCMGT ., /Z, ., #0.0 + /// theEmitter->emitIns_R_R_R(INS_sve_fcmgt, EA_SCALABLE, REG_P11, REG_P5, REG_V2, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThan(Vector left, Vector right) => CompareLessThan(left, right); + + + /// CompareLessThanOrEqual : Compare less than or equal to + + /// + /// svbool_t svcmple[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FCMGE Presult.H, Pg/Z, Zop2.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FCMGE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fcmge, EA_SCALABLE, REG_P13, REG_P5, REG_V8, REG_V18, INS_OPTS_SCALABLE_D); + /// IF_SVE_HI_3A FCMGE ., /Z, ., #0.0 + /// theEmitter->emitIns_R_R_R(INS_sve_fcmge, EA_SCALABLE, REG_P1, REG_P2, REG_V3, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right) => CompareLessThanOrEqual(left, right); + + + /// CompareNotEqualTo : Compare not equal to + + /// + /// svbool_t svcmpne[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FCMNE Presult.H, Pg/Z, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FCMNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fcmne, EA_SCALABLE, REG_P11, REG_P1, REG_V21, REG_V10, INS_OPTS_SCALABLE_H); + /// IF_SVE_HI_3A FCMNE ., /Z, ., #0.0 + /// theEmitter->emitIns_R_R_R(INS_sve_fcmne, EA_SCALABLE, REG_P1, REG_P0, REG_V5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right) => CompareNotEqualTo(left, right); + + + /// CompareUnordered : Compare unordered with + + /// + /// svbool_t svcmpuo[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FCMUO Presult.H, Pg/Z, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_HT_4A FCMUO ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fcmuo, EA_SCALABLE, REG_P5, REG_P2, REG_V31, REG_V20, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector CompareUnordered(Vector left, Vector right) => CompareUnordered(left, right); + + + /// ConcatenateEvenInt128FromTwoInputs : Concatenate even quadwords from two inputs + + /// + /// svfloat16_t svuzp1q[_f16](svfloat16_t op1, svfloat16_t op2) + /// UZP1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector ConcatenateEvenInt128FromTwoInputs(Vector left, Vector right) => ConcatenateEvenInt128FromTwoInputs(left, right); + + + /// ConcatenateOddInt128FromTwoInputs : Concatenate odd quadwords from two inputs + + /// + /// svfloat16_t svuzp2q[_f16](svfloat16_t op1, svfloat16_t op2) + /// UZP2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConcatenateOddInt128FromTwoInputs(Vector left, Vector right) => ConcatenateOddInt128FromTwoInputs(left, right); + + + /// ConditionalExtractAfterLastActiveElement : Conditionally extract element after last + + /// + /// svfloat16_t svclasta[_f16](svbool_t pg, svfloat16_t fallback, svfloat16_t data) + /// CLASTA Ztied.H, Pg, Ztied.H, Zdata.H + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.H, Pg, Zresult.H, Zdata.H + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValue, data); + + /// + /// svfloat16_t svclasta[_f16](svbool_t pg, svfloat16_t fallback, svfloat16_t data) + /// CLASTA Ztied.H, Pg, Ztied.H, Zdata.H + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.H, Pg, Zresult.H, Zdata.H + /// float16_t svclasta[_n_f16](svbool_t pg, float16_t fallback, svfloat16_t data) + /// CLASTA Wtied, Pg, Wtied, Zdata.H + /// CLASTA Htied, Pg, Htied, Zdata.H + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe half ConditionalExtractAfterLastActiveElement(Vector mask, half defaultValues, Vector data) => ConditionalExtractAfterLastActiveElement(mask, defaultValues, data); + + + /// ConditionalExtractAfterLastActiveElementAndReplicate : Conditionally extract element after last + + /// + /// svfloat16_t svclasta[_f16](svbool_t pg, svfloat16_t fallback, svfloat16_t data) + /// CLASTA Ztied.H, Pg, Ztied.H, Zdata.H + /// MOVPRFX Zresult, Zfallback; CLASTA Zresult.H, Pg, Zresult.H, Zdata.H + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTA ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_B); + /// IF_SVE_CN_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_2BYTE, REG_V12, REG_P1, REG_V15, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTA , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R0, REG_P0, REG_V0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_clasta, EA_4BYTE, REG_R1, REG_P2, REG_V3, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultScalar, Vector data) => ConditionalExtractAfterLastActiveElementAndReplicate(mask, defaultScalar, data); + + + /// ConditionalExtractLastActiveElement : Conditionally extract last element + + /// + /// svfloat16_t svclastb[_f16](svbool_t pg, svfloat16_t fallback, svfloat16_t data) + /// CLASTB Ztied.H, Pg, Ztied.H, Zdata.H + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.H, Pg, Zresult.H, Zdata.H + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultValue, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValue, data); + + /// + /// svfloat16_t svclastb[_f16](svbool_t pg, svfloat16_t fallback, svfloat16_t data) + /// CLASTB Ztied.H, Pg, Ztied.H, Zdata.H + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.H, Pg, Zresult.H, Zdata.H + /// float16_t svclastb[_n_f16](svbool_t pg, float16_t fallback, svfloat16_t data) + /// CLASTB Wtied, Pg, Wtied, Zdata.H + /// CLASTB Htied, Pg, Htied, Zdata.H + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe half ConditionalExtractLastActiveElement(Vector mask, half defaultValues, Vector data) => ConditionalExtractLastActiveElement(mask, defaultValues, data); + + + /// ConditionalExtractLastActiveElementAndReplicate : Conditionally extract last element + + /// + /// svfloat16_t svclastb[_f16](svbool_t pg, svfloat16_t fallback, svfloat16_t data) + /// CLASTB Ztied.H, Pg, Ztied.H, Zdata.H + /// MOVPRFX Zresult, Zfallback; CLASTB Zresult.H, Pg, Zresult.H, Zdata.H + /// + /// codegenarm64test: + /// IF_SVE_CM_3A CLASTB ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_SCALABLE, REG_V30, REG_P6, REG_V30, INS_OPTS_SCALABLE_D); + /// IF_SVE_CN_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_V13, REG_P2, REG_V16, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_V14, REG_P0, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CO_3A CLASTB , , , . + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_4BYTE, REG_R23, REG_P5, REG_V12, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_clastb, EA_8BYTE, REG_R3, REG_P6, REG_V9, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector fallback, Vector data) => ConditionalExtractLastActiveElementAndReplicate(mask, fallback, data); + + + /// ConditionalSelect : Conditionally select elements + + /// + /// svfloat16_t svsel[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// SEL Zresult.H, Pg, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_CZ_4A CMPNE ., /Z, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_P4, REG_P6, REG_P13, REG_P10, INS_OPTS_SCALABLE_B); /* SEL .B, , .B, .B */ + /// IF_SVE_CW_4A SEL ., , ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_V29, REG_P15, REG_V28, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R_R(INS_sve_sel, EA_SCALABLE, REG_V5, REG_P13, REG_V27, REG_V5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right) => ConditionalSelect(mask, left, right); + + + /// ConvertToDouble : Floating-point convert + + /// + /// svfloat64_t svcvt_f64[_f16]_m(svfloat64_t inactive, svbool_t pg, svfloat16_t op) + /// FCVT Ztied.D, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; FCVT Zresult.D, Pg/M, Zop.H + /// svfloat64_t svcvt_f64[_f16]_x(svbool_t pg, svfloat16_t op) + /// FCVT Ztied.D, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; FCVT Zresult.D, Pg/M, Zop.H + /// svfloat64_t svcvt_f64[_f16]_z(svbool_t pg, svfloat16_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; FCVT Zresult.D, Pg/M, Zop.H + /// + /// codegenarm64test: + /// sve_fcvt - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToDouble(Vector value) => ConvertToDouble(value); + + + /// ConvertToHalf : Floating-point convert + + /// + /// svfloat16_t svcvt_f16[_s16]_m(svfloat16_t inactive, svbool_t pg, svint16_t op) + /// SCVTF Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; SCVTF Zresult.H, Pg/M, Zop.H + /// svfloat16_t svcvt_f16[_s16]_x(svbool_t pg, svint16_t op) + /// SCVTF Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; SCVTF Zresult.H, Pg/M, Zop.H + /// svfloat16_t svcvt_f16[_s16]_z(svbool_t pg, svint16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; SCVTF Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// sve_scvtf - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToHalf(Vector value) => ConvertToHalf(value); + + /// + /// svfloat16_t svcvt_f16[_s32]_m(svfloat16_t inactive, svbool_t pg, svint32_t op) + /// SCVTF Ztied.H, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; SCVTF Zresult.H, Pg/M, Zop.S + /// svfloat16_t svcvt_f16[_s32]_x(svbool_t pg, svint32_t op) + /// SCVTF Ztied.H, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; SCVTF Zresult.H, Pg/M, Zop.S + /// svfloat16_t svcvt_f16[_s32]_z(svbool_t pg, svint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; SCVTF Zresult.H, Pg/M, Zop.S + /// + /// codegenarm64test: + /// sve_scvtf - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToHalf(Vector value) => ConvertToHalf(value); + + /// + /// svfloat16_t svcvt_f16[_s64]_m(svfloat16_t inactive, svbool_t pg, svint64_t op) + /// SCVTF Ztied.H, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; SCVTF Zresult.H, Pg/M, Zop.D + /// svfloat16_t svcvt_f16[_s64]_x(svbool_t pg, svint64_t op) + /// SCVTF Ztied.H, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; SCVTF Zresult.H, Pg/M, Zop.D + /// svfloat16_t svcvt_f16[_s64]_z(svbool_t pg, svint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; SCVTF Zresult.H, Pg/M, Zop.D + /// + /// codegenarm64test: + /// sve_scvtf - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToHalf(Vector value) => ConvertToHalf(value); + + /// + /// svfloat16_t svcvt_f16[_u16]_m(svfloat16_t inactive, svbool_t pg, svuint16_t op) + /// UCVTF Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; UCVTF Zresult.H, Pg/M, Zop.H + /// svfloat16_t svcvt_f16[_u16]_x(svbool_t pg, svuint16_t op) + /// UCVTF Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; UCVTF Zresult.H, Pg/M, Zop.H + /// svfloat16_t svcvt_f16[_u16]_z(svbool_t pg, svuint16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; UCVTF Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// sve_ucvtf - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToHalf(Vector value) => ConvertToHalf(value); + + /// + /// svfloat16_t svcvt_f16[_u32]_m(svfloat16_t inactive, svbool_t pg, svuint32_t op) + /// UCVTF Ztied.H, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; UCVTF Zresult.H, Pg/M, Zop.S + /// svfloat16_t svcvt_f16[_u32]_x(svbool_t pg, svuint32_t op) + /// UCVTF Ztied.H, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; UCVTF Zresult.H, Pg/M, Zop.S + /// svfloat16_t svcvt_f16[_u32]_z(svbool_t pg, svuint32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; UCVTF Zresult.H, Pg/M, Zop.S + /// + /// codegenarm64test: + /// sve_ucvtf - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToHalf(Vector value) => ConvertToHalf(value); + + /// + /// svfloat16_t svcvt_f16[_u64]_m(svfloat16_t inactive, svbool_t pg, svuint64_t op) + /// UCVTF Ztied.H, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; UCVTF Zresult.H, Pg/M, Zop.D + /// svfloat16_t svcvt_f16[_u64]_x(svbool_t pg, svuint64_t op) + /// UCVTF Ztied.H, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; UCVTF Zresult.H, Pg/M, Zop.D + /// svfloat16_t svcvt_f16[_u64]_z(svbool_t pg, svuint64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; UCVTF Zresult.H, Pg/M, Zop.D + /// + /// codegenarm64test: + /// sve_ucvtf - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToHalf(Vector value) => ConvertToHalf(value); + + /// + /// svfloat16_t svcvt_f16[_f32]_m(svfloat16_t inactive, svbool_t pg, svfloat32_t op) + /// FCVT Ztied.H, Pg/M, Zop.S + /// MOVPRFX Zresult, Zinactive; FCVT Zresult.H, Pg/M, Zop.S + /// svfloat16_t svcvt_f16[_f32]_x(svbool_t pg, svfloat32_t op) + /// FCVT Ztied.H, Pg/M, Ztied.S + /// MOVPRFX Zresult, Zop; FCVT Zresult.H, Pg/M, Zop.S + /// svfloat16_t svcvt_f16[_f32]_z(svbool_t pg, svfloat32_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; FCVT Zresult.H, Pg/M, Zop.S + /// + /// codegenarm64test: + /// sve_fcvt - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToHalf(Vector value) => ConvertToHalf(value); + + /// + /// svfloat16_t svcvt_f16[_f64]_m(svfloat16_t inactive, svbool_t pg, svfloat64_t op) + /// FCVT Ztied.H, Pg/M, Zop.D + /// MOVPRFX Zresult, Zinactive; FCVT Zresult.H, Pg/M, Zop.D + /// svfloat16_t svcvt_f16[_f64]_x(svbool_t pg, svfloat64_t op) + /// FCVT Ztied.H, Pg/M, Ztied.D + /// MOVPRFX Zresult, Zop; FCVT Zresult.H, Pg/M, Zop.D + /// svfloat16_t svcvt_f16[_f64]_z(svbool_t pg, svfloat64_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; FCVT Zresult.H, Pg/M, Zop.D + /// + /// codegenarm64test: + /// sve_fcvt - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToHalf(Vector value) => ConvertToHalf(value); + + + /// ConvertToInt16 : Floating-point convert + + /// + /// svint16_t svcvt_s16[_f16]_m(svint16_t inactive, svbool_t pg, svfloat16_t op) + /// FCVTZS Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; FCVTZS Zresult.H, Pg/M, Zop.H + /// svint16_t svcvt_s16[_f16]_x(svbool_t pg, svfloat16_t op) + /// FCVTZS Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; FCVTZS Zresult.H, Pg/M, Zop.H + /// svint16_t svcvt_s16[_f16]_z(svbool_t pg, svfloat16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; FCVTZS Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// sve_fcvtzs - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToInt16(Vector value) => ConvertToInt16(value); + + + /// ConvertToInt32 : Floating-point convert + + /// + /// svint32_t svcvt_s32[_f16]_m(svint32_t inactive, svbool_t pg, svfloat16_t op) + /// FCVTZS Ztied.S, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; FCVTZS Zresult.S, Pg/M, Zop.H + /// svint32_t svcvt_s32[_f16]_x(svbool_t pg, svfloat16_t op) + /// FCVTZS Ztied.S, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; FCVTZS Zresult.S, Pg/M, Zop.H + /// svint32_t svcvt_s32[_f16]_z(svbool_t pg, svfloat16_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; FCVTZS Zresult.S, Pg/M, Zop.H + /// + /// codegenarm64test: + /// sve_fcvtzs - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToInt32(Vector value) => ConvertToInt32(value); + + + /// ConvertToInt64 : Floating-point convert + + /// + /// svint64_t svcvt_s64[_f16]_m(svint64_t inactive, svbool_t pg, svfloat16_t op) + /// FCVTZS Ztied.D, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; FCVTZS Zresult.D, Pg/M, Zop.H + /// svint64_t svcvt_s64[_f16]_x(svbool_t pg, svfloat16_t op) + /// FCVTZS Ztied.D, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; FCVTZS Zresult.D, Pg/M, Zop.H + /// svint64_t svcvt_s64[_f16]_z(svbool_t pg, svfloat16_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; FCVTZS Zresult.D, Pg/M, Zop.H + /// + /// codegenarm64test: + /// sve_fcvtzs - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToInt64(Vector value) => ConvertToInt64(value); + + + /// ConvertToSingle : Floating-point convert + + /// + /// svfloat32_t svcvt_f32[_f16]_m(svfloat32_t inactive, svbool_t pg, svfloat16_t op) + /// FCVT Ztied.S, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; FCVT Zresult.S, Pg/M, Zop.H + /// svfloat32_t svcvt_f32[_f16]_x(svbool_t pg, svfloat16_t op) + /// FCVT Ztied.S, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; FCVT Zresult.S, Pg/M, Zop.H + /// svfloat32_t svcvt_f32[_f16]_z(svbool_t pg, svfloat16_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; FCVT Zresult.S, Pg/M, Zop.H + /// + /// codegenarm64test: + /// sve_fcvt - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToSingle(Vector value) => ConvertToSingle(value); + + + /// ConvertToUInt16 : Floating-point convert + + /// + /// svuint16_t svcvt_u16[_f16]_m(svuint16_t inactive, svbool_t pg, svfloat16_t op) + /// FCVTZU Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; FCVTZU Zresult.H, Pg/M, Zop.H + /// svuint16_t svcvt_u16[_f16]_x(svbool_t pg, svfloat16_t op) + /// FCVTZU Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; FCVTZU Zresult.H, Pg/M, Zop.H + /// svuint16_t svcvt_u16[_f16]_z(svbool_t pg, svfloat16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; FCVTZU Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// sve_fcvtzu - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToUInt16(Vector value) => ConvertToUInt16(value); + + + /// ConvertToUInt32 : Floating-point convert + + /// + /// svuint32_t svcvt_u32[_f16]_m(svuint32_t inactive, svbool_t pg, svfloat16_t op) + /// FCVTZU Ztied.S, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; FCVTZU Zresult.S, Pg/M, Zop.H + /// svuint32_t svcvt_u32[_f16]_x(svbool_t pg, svfloat16_t op) + /// FCVTZU Ztied.S, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; FCVTZU Zresult.S, Pg/M, Zop.H + /// svuint32_t svcvt_u32[_f16]_z(svbool_t pg, svfloat16_t op) + /// MOVPRFX Zresult.S, Pg/Z, Zop.S; FCVTZU Zresult.S, Pg/M, Zop.H + /// + /// codegenarm64test: + /// sve_fcvtzu - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToUInt32(Vector value) => ConvertToUInt32(value); + + + /// ConvertToUInt64 : Floating-point convert + + /// + /// svuint64_t svcvt_u64[_f16]_m(svuint64_t inactive, svbool_t pg, svfloat16_t op) + /// FCVTZU Ztied.D, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; FCVTZU Zresult.D, Pg/M, Zop.H + /// svuint64_t svcvt_u64[_f16]_x(svbool_t pg, svfloat16_t op) + /// FCVTZU Ztied.D, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; FCVTZU Zresult.D, Pg/M, Zop.H + /// svuint64_t svcvt_u64[_f16]_z(svbool_t pg, svfloat16_t op) + /// MOVPRFX Zresult.D, Pg/Z, Zop.D; FCVTZU Zresult.D, Pg/M, Zop.H + /// + /// codegenarm64test: + /// sve_fcvtzu - not implemented in coreclr + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ConvertToUInt64(Vector value) => ConvertToUInt64(value); + + + + /// CreateFalseMaskHalf : Set all predicate elements to false + + /// + /// svbool_t svpfalse[_b]() + /// PFALSE Presult.B + /// + /// codegenarm64test: + /// IF_SVE_DJ_1A PFALSE .B + /// theEmitter->emitIns_R(INS_sve_pfalse, EA_SCALABLE, REG_P13); + /// + public static unsafe Vector CreateFalseMaskHalf() => CreateFalseMaskHalf(); + + + /// CreateTrueMaskHalf : Set predicate elements to true + + /// + /// svbool_t svptrue_pat_b8(enum svpattern pattern) + /// PTRUE Presult.B, pattern + /// + /// codegenarm64test: + /// IF_SVE_DE_1A PTRUE .{, } + /// theEmitter->emitIns_R_PATTERN(INS_sve_ptrue, EA_SCALABLE, REG_P0, INS_OPTS_SCALABLE_B, SVE_PATTERN_POW2); + /// theEmitter->emitIns_R_PATTERN(INS_sve_ptrue, EA_SCALABLE, REG_P7, INS_OPTS_SCALABLE_H, SVE_PATTERN_MUL3); + /// IF_SVE_DZ_1A PTRUE . + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P8, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P9, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P10, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R(INS_sve_ptrue, EA_SCALABLE, REG_P11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateTrueMaskHalf([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All) => CreateTrueMaskHalf(pattern); + + + /// CreateWhileReadAfterWriteMask : While free of read-after-write conflicts + + /// + /// svbool_t svwhilerw[_f16](const float16_t *op1, const float16_t *op2) + /// WHILERW Presult.H, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DU_3A WHILERW ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P0, REG_R0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P2, REG_R4, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilerw, EA_8BYTE, REG_P3, REG_R6, REG_R7, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateWhileReadAfterWriteMask(half* left, half* right) => CreateWhileReadAfterWriteMask(left, right); + + + /// CreateWhileWriteAfterReadMask : While free of write-after-read conflicts + + /// + /// svbool_t svwhilewr[_f16](const float16_t *op1, const float16_t *op2) + /// WHILEWR Presult.H, Xop1, Xop2 + /// + /// codegenarm64test: + /// IF_SVE_DU_3A WHILEWR ., , + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P4, REG_R8, REG_R9, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P5, REG_R10, REG_R11, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P6, REG_R12, REG_R13, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_whilewr, EA_8BYTE, REG_P7, REG_R14, REG_R15, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector CreateWhileWriteAfterReadMask(half* left, half* right) => CreateWhileWriteAfterReadMask(left, right); + + + /// Divide : Divide + + /// + /// svfloat16_t svdiv[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FDIV Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; FDIV Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svfloat16_t svdiv[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FDIV Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// FDIVR Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; FDIV Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svfloat16_t svdiv[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; FDIV Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; FDIVR Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FDIV ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fdiv, EA_SCALABLE, REG_V28, REG_P0, REG_V7, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Divide(Vector left, Vector right) => Divide(left, right); + + + + /// DownConvertNarrowingUpper : Down convert and narrow (top) + + /// + /// svfloat16_t svcvtnt_f16[_f32]_m(svfloat16_t even, svbool_t pg, svfloat32_t op) + /// FCVTNT Ztied.H, Pg/M, Zop.S + /// svfloat16_t svcvtnt_f16[_f32]_x(svfloat16_t even, svbool_t pg, svfloat32_t op) + /// FCVTNT Ztied.H, Pg/M, Zop.S + /// + /// codegenarm64test: + /// IF_SVE_GQ_3A FCVTNT .H, /M, .S + /// theEmitter->emitIns_R_R_R(INS_sve_fcvtnt, EA_SCALABLE, REG_V18, REG_P3, REG_V9, INS_OPTS_S_TO_H); + /// theEmitter->emitIns_R_R_R(INS_sve_fcvtnt, EA_SCALABLE, REG_V12, REG_P3, REG_V5, INS_OPTS_D_TO_S); + /// IF_SVE_HG_2A FCVTNT .B, {.S-.S } + /// theEmitter->emitIns_R_R(INS_sve_fcvtnt, EA_SCALABLE, REG_V14, REG_V15); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector DownConvertNarrowingUpper(Vector value) => DownConvertNarrowingUpper(value); + + + /// DuplicateSelectedScalarToVector : Broadcast a scalar value + + /// + /// svfloat16_t svdup_lane[_f16](svfloat16_t data, uint16_t index) + /// DUP Zresult.H, Zdata.H[index] + /// TBL Zresult.H, Zdata.H, Zindex.H + /// svfloat16_t svdupq_lane[_f16](svfloat16_t data, uint64_t index) + /// DUP Zresult.Q, Zdata.Q[index] + /// TBL Zresult.D, Zdata.D, Zindices_d.D + /// + /// codegenarm64test: + /// IF_SVE_EB_1A DUP ., #{, } + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V0, -128, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V1, 0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_SHIFT); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_I(INS_sve_dup, EA_SCALABLE, REG_V3, 127, INS_OPTS_SCALABLE_D); + /// IF_SVE_CB_2A DUP ., + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V0, REG_R1, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V2, REG_R3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_4BYTE, REG_V1, REG_R5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_dup, EA_8BYTE, REG_V4, REG_SP, INS_OPTS_SCALABLE_D); + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(data, index); + + + /// ExtractAfterLastScalar : Extract element after last + + /// + /// float16_t svlasta[_f16](svbool_t pg, svfloat16_t op) + /// LASTA Wresult, Pg, Zop.H + /// LASTA Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_1BYTE, REG_V6, REG_P1, REG_V27, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_2BYTE, REG_V5, REG_P2, REG_V26, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R1, REG_P5, REG_V23, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R0, REG_P6, REG_V22, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe half ExtractAfterLastScalar(Vector value) => ExtractAfterLastScalar(value); + + + /// ExtractAfterLastVector : Extract element after last + + /// + /// float16_t svlasta[_f16](svbool_t pg, svfloat16_t op) + /// LASTA Wresult, Pg, Zop.H + /// LASTA Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_1BYTE, REG_V6, REG_P1, REG_V27, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_2BYTE, REG_V5, REG_P2, REG_V26, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTA , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R1, REG_P5, REG_V23, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_lasta, EA_4BYTE, REG_R0, REG_P6, REG_V22, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ExtractAfterLastVector(Vector value) => ExtractAfterLastVector(value); + + + /// ExtractLastScalar : Extract last element + + /// + /// float16_t svlastb[_f16](svbool_t pg, svfloat16_t op) + /// LASTB Wresult, Pg, Zop.H + /// LASTB Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_V4, REG_P3, REG_V25, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_V3, REG_P4, REG_V24, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_R30, REG_P7, REG_V21, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_R29, REG_P0, REG_V20, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe half ExtractLastScalar(Vector value) => ExtractLastScalar(value); + + + /// ExtractLastVector : Extract last element + + /// + /// float16_t svlastb[_f16](svbool_t pg, svfloat16_t op) + /// LASTB Wresult, Pg, Zop.H + /// LASTB Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_CR_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_V4, REG_P3, REG_V25, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_V3, REG_P4, REG_V24, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_SIMD_SCALAR); + /// IF_SVE_CS_3A LASTB , , . + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_4BYTE, REG_R30, REG_P7, REG_V21, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_lastb, EA_8BYTE, REG_R29, REG_P0, REG_V20, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ExtractLastVector(Vector value) => ExtractLastVector(value); + + + /// ExtractVector : Extract vector from pair of vectors + + /// + /// svfloat16_t svext[_f16](svfloat16_t op1, svfloat16_t op2, uint64_t imm3) + /// EXT Ztied1.B, Ztied1.B, Zop2.B, #imm3 * 2 + /// MOVPRFX Zresult, Zop1; EXT Zresult.B, Zresult.B, Zop2.B, #imm3 * 2 + /// + /// codegenarm64test: + /// sve_ext - not implemented in coreclr + /// + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstantExpected] byte index) => ExtractVector(upper, lower, index); + + + /// FloatingPointExponentialAccelerator : Floating-point exponential accelerator + + /// + /// svfloat16_t svexpa[_f16](svuint16_t op) + /// FEXPA Zresult.H, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_BJ_2A FEXPA ., . + /// theEmitter->emitIns_R_R(INS_sve_fexpa, EA_SCALABLE, REG_V0, REG_V1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_fexpa, EA_SCALABLE, REG_V3, REG_V0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_fexpa, EA_SCALABLE, REG_V1, REG_V0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector FloatingPointExponentialAccelerator(Vector value) => FloatingPointExponentialAccelerator(value); + + + /// FusedMultiplyAdd : Multiply-add, addend first + + /// + /// svfloat16_t svmla[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// FMLA Ztied1.H, Pg/M, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; FMLA Zresult.H, Pg/M, Zop2.H, Zop3.H + /// svfloat16_t svmla[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// FMLA Ztied1.H, Pg/M, Zop2.H, Zop3.H + /// FMAD Ztied2.H, Pg/M, Zop3.H, Zop1.H + /// FMAD Ztied3.H, Pg/M, Zop2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; FMLA Zresult.H, Pg/M, Zop2.H, Zop3.H + /// svfloat16_t svmla[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; FMLA Zresult.H, Pg/M, Zop2.H, Zop3.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; FMAD Zresult.H, Pg/M, Zop3.H, Zop1.H + /// MOVPRFX Zresult.H, Pg/Z, Zop3.H; FMAD Zresult.H, Pg/M, Zop2.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_HU_4A FMLA ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fmla, EA_SCALABLE, REG_V0, REG_P0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// IF_SVE_GU_3A FMLA .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmla, EA_SCALABLE, REG_V0, REG_V2, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmla, EA_SCALABLE, REG_V4, REG_V6, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_GU_3B FMLA .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmla, EA_SCALABLE, REG_V1, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmla, EA_SCALABLE, REG_V3, REG_V2, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector FusedMultiplyAdd(Vector addend, Vector left, Vector right) => FusedMultiplyAdd(addend, left, right); + + + /// FusedMultiplyAddBySelectedScalar : Multiply-add, addend first + + /// + /// svfloat16_t svmla_lane[_f16](svfloat16_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index) + /// FMLA Ztied1.H, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; FMLA Zresult.H, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_HU_4A FMLA ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fmla, EA_SCALABLE, REG_V0, REG_P0, REG_V1, REG_V2, INS_OPTS_SCALABLE_H); + /// IF_SVE_GU_3A FMLA .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmla, EA_SCALABLE, REG_V0, REG_V2, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmla, EA_SCALABLE, REG_V4, REG_V6, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// IF_SVE_GU_3B FMLA .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmla, EA_SCALABLE, REG_V1, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmla, EA_SCALABLE, REG_V3, REG_V2, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector FusedMultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => FusedMultiplyAddBySelectedScalar(addend, left, right, rightIndex); + + + /// FusedMultiplyAddNegated : Negated multiply-add, addend first + + /// + /// svfloat16_t svnmla[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// FNMLA Ztied1.H, Pg/M, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; FNMLA Zresult.H, Pg/M, Zop2.H, Zop3.H + /// svfloat16_t svnmla[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// FNMLA Ztied1.H, Pg/M, Zop2.H, Zop3.H + /// FNMAD Ztied2.H, Pg/M, Zop3.H, Zop1.H + /// FNMAD Ztied3.H, Pg/M, Zop2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; FNMLA Zresult.H, Pg/M, Zop2.H, Zop3.H + /// svfloat16_t svnmla[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; FNMLA Zresult.H, Pg/M, Zop2.H, Zop3.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; FNMAD Zresult.H, Pg/M, Zop3.H, Zop1.H + /// MOVPRFX Zresult.H, Pg/Z, Zop3.H; FNMAD Zresult.H, Pg/M, Zop2.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_HU_4A FNMLA ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fnmla, EA_SCALABLE, REG_V6, REG_P4, REG_V7, REG_V8, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector FusedMultiplyAddNegated(Vector addend, Vector left, Vector right) => FusedMultiplyAddNegated(addend, left, right); + + + /// FusedMultiplySubtract : Multiply-subtract, minuend first + + /// + /// svfloat16_t svmls[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// FMLS Ztied1.H, Pg/M, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; FMLS Zresult.H, Pg/M, Zop2.H, Zop3.H + /// svfloat16_t svmls[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// FMLS Ztied1.H, Pg/M, Zop2.H, Zop3.H + /// FMSB Ztied2.H, Pg/M, Zop3.H, Zop1.H + /// FMSB Ztied3.H, Pg/M, Zop2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; FMLS Zresult.H, Pg/M, Zop2.H, Zop3.H + /// svfloat16_t svmls[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; FMLS Zresult.H, Pg/M, Zop2.H, Zop3.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; FMSB Zresult.H, Pg/M, Zop3.H, Zop1.H + /// MOVPRFX Zresult.H, Pg/Z, Zop3.H; FMSB Zresult.H, Pg/M, Zop2.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_HU_4A FMLS ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fmls, EA_SCALABLE, REG_V3, REG_P2, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// IF_SVE_GU_3A FMLS .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmls, EA_SCALABLE, REG_V8, REG_V10, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmls, EA_SCALABLE, REG_V12, REG_V14, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_GU_3B FMLS .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmls, EA_SCALABLE, REG_V5, REG_V4, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmls, EA_SCALABLE, REG_V7, REG_V6, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector FusedMultiplySubtract(Vector minuend, Vector left, Vector right) => FusedMultiplySubtract(minuend, left, right); + + + /// FusedMultiplySubtractBySelectedScalar : Multiply-subtract, minuend first + + /// + /// svfloat16_t svmls_lane[_f16](svfloat16_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index) + /// FMLS Ztied1.H, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; FMLS Zresult.H, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_HU_4A FMLS ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fmls, EA_SCALABLE, REG_V3, REG_P2, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// IF_SVE_GU_3A FMLS .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmls, EA_SCALABLE, REG_V8, REG_V10, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmls, EA_SCALABLE, REG_V12, REG_V14, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_GU_3B FMLS .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmls, EA_SCALABLE, REG_V5, REG_V4, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmls, EA_SCALABLE, REG_V7, REG_V6, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector FusedMultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex) => FusedMultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); + + + /// FusedMultiplySubtractNegated : Negated multiply-subtract, minuend first + + /// + /// svfloat16_t svnmls[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// FNMLS Ztied1.H, Pg/M, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; FNMLS Zresult.H, Pg/M, Zop2.H, Zop3.H + /// svfloat16_t svnmls[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// FNMLS Ztied1.H, Pg/M, Zop2.H, Zop3.H + /// FNMSB Ztied2.H, Pg/M, Zop3.H, Zop1.H + /// FNMSB Ztied3.H, Pg/M, Zop2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; FNMLS Zresult.H, Pg/M, Zop2.H, Zop3.H + /// svfloat16_t svnmls[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; FNMLS Zresult.H, Pg/M, Zop2.H, Zop3.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; FNMSB Zresult.H, Pg/M, Zop3.H, Zop1.H + /// MOVPRFX Zresult.H, Pg/Z, Zop3.H; FNMSB Zresult.H, Pg/M, Zop2.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_HU_4A FNMLS ., /M, ., . + /// theEmitter->emitIns_R_R_R_R(INS_sve_fnmls, EA_SCALABLE, REG_V9, REG_P6, REG_V10, REG_V11, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector FusedMultiplySubtractNegated(Vector minuend, Vector left, Vector right) => FusedMultiplySubtractNegated(minuend, left, right); + + + /// GetActiveElementCount : Count set predicate bits + + /// + /// uint64_t svcntp_b8(svbool_t pg, svbool_t op) + /// CNTP Xresult, Pg, Pop.B + /// + /// codegenarm64test: + /// IF_SVE_DK_3A CNTP , , . + /// theEmitter->emitIns_R_R_R(INS_sve_cntp, EA_8BYTE, REG_R29, REG_P0, REG_P15, INS_OPTS_SCALABLE_D); + /// IF_SVE_DL_2A CNTP , ., + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R0, REG_P0, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R1, REG_P1, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R2, REG_P2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R3, REG_P3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R4, REG_P4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R5, REG_P5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_VL_4X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R6, REG_P6, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_2X); + /// theEmitter->emitIns_R_R(INS_sve_cntp, EA_8BYTE, REG_R7, REG_P7, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_VL_4X); + /// + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from) => GetActiveElementCount(mask, from); + + + /// InsertIntoShiftedVector : Insert scalar into shifted vector + + /// + /// svfloat16_t svinsr[_n_f16](svfloat16_t op1, float16_t op2) + /// INSR Ztied1.H, Wop2 + /// INSR Ztied1.H, Hop2 + /// + /// codegenarm64test: + /// sve_insr - not implemented in coreclr + /// + public static unsafe Vector InsertIntoShiftedVector(Vector left, half right) => InsertIntoShiftedVector(left, right); + + + /// InterleaveEvenInt128FromTwoInputs : Interleave even quadwords from two inputs + + /// + /// svfloat16_t svtrn1q[_f16](svfloat16_t op1, svfloat16_t op2) + /// TRN1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_P1, REG_P3, REG_P4, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector InterleaveEvenInt128FromTwoInputs(Vector left, Vector right) => InterleaveEvenInt128FromTwoInputs(left, right); + + + /// InterleaveInt128FromHighHalvesOfTwoInputs : Interleave quadwords from high halves of two inputs + + /// + /// svfloat16_t svzip2q[_f16](svfloat16_t op1, svfloat16_t op2) + /// ZIP2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V30, REG_V31, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V1, REG_V2, REG_V3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleaveInt128FromHighHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromHighHalvesOfTwoInputs(left, right); + + + /// InterleaveInt128FromLowHalvesOfTwoInputs : Interleave quadwords from low halves of two inputs + + /// + /// svfloat16_t svzip1q[_f16](svfloat16_t op1, svfloat16_t op2) + /// ZIP1 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V24, REG_V25, REG_V26, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V27, REG_V28, REG_V29, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector InterleaveInt128FromLowHalvesOfTwoInputs(Vector left, Vector right) => InterleaveInt128FromLowHalvesOfTwoInputs(left, right); + + + /// InterleaveOddInt128FromTwoInputs : Interleave odd quadwords from two inputs + + /// + /// svfloat16_t svtrn2q[_f16](svfloat16_t op1, svfloat16_t op2) + /// TRN2 Zresult.Q, Zop1.Q, Zop2.Q + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_P5, REG_P2, REG_P7, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector InterleaveOddInt128FromTwoInputs(Vector left, Vector right) => InterleaveOddInt128FromTwoInputs(left, right); + + + /// LoadVector : Unextended load + + /// + /// svfloat16_t svld1[_f16](svbool_t pg, const float16_t *base) + /// LD1H Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IJ_3A_G LD1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R6, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HW_4A LD1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P3, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LD1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V2, REG_P1, REG_R0, REG_V1, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LD1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P5, REG_R4, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LD1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LD1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LD1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_IK_4A_I LD1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1h, EA_SCALABLE, REG_V4, REG_P2, REG_R3, REG_R1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LD1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1h, EA_SCALABLE, REG_V1, REG_P0, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVector(Vector mask, half* address) => LoadVector(mask, address); + + + /// LoadVector128AndReplicateToVector : Load and replicate 128 bits of data + + /// + /// svfloat16_t svld1rq[_f16](svbool_t pg, const float16_t *base) + /// LD1RQH Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD1RQH Zresult.H, Pg/Z, [Xarray, #index * 2] + /// LD1RQH Zresult.H, Pg/Z, [Xbase, #0] + /// + /// codegenarm64test: + /// IF_SVE_IO_3A LD1RQH {.H }, /Z, [{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1rqh, EA_SCALABLE, REG_V4, REG_P5, REG_R6, 112, INS_OPTS_SCALABLE_H); + /// IF_SVE_IP_4A LD1RQH {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1rqh, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, half* address) => LoadVector128AndReplicateToVector(mask, address); + + + /// LoadVector256AndReplicateToVector : Load and replicate 256 bits of data + + /// + /// svfloat16_t svld1ro[_f16](svbool_t pg, const float16_t *base) + /// LD1ROH Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD1ROH Zresult.H, Pg/Z, [Xarray, #index * 2] + /// LD1ROH Zresult.H, Pg/Z, [Xbase, #0] + /// + /// codegenarm64test: + /// IF_SVE_IO_3A LD1ROH {.H }, /Z, [{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld1roh, EA_SCALABLE, REG_V8, REG_P3, REG_R1, -256, INS_OPTS_SCALABLE_H); + /// IF_SVE_IP_4A LD1ROH {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld1roh, EA_SCALABLE, REG_V4, REG_P3, REG_R2, REG_R1, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector LoadVector256AndReplicateToVector(Vector mask, half* address) => LoadVector256AndReplicateToVector(mask, address); + + + /// LoadVectorFirstFaulting : Unextended load, first-faulting + + /// + /// svfloat16_t svldff1[_f16](svbool_t pg, const float16_t *base) + /// LDFF1H Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1] + /// LDFF1H Zresult.H, Pg/Z, [Xbase, XZR, LSL #1] + /// + /// codegenarm64test: + /// IF_SVE_HW_4A LDFF1H {.S }, /Z, [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P1, REG_R3, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_A LDFF1H {.D }, /Z, [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P5, REG_R1, REG_V2, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_HW_4A_B LDFF1H {.D }, /Z, [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V1, REG_P3, REG_R4, REG_V5, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_HW_4A_C LDFF1H {.S }, /Z, [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P2, REG_R1, REG_V3, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_HW_4B LDFF1H {.D }, /Z, [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V0, REG_P2, REG_R6, REG_V1, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HW_4B_D LDFF1H {.D }, /Z, [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V5, INS_OPTS_SCALABLE_D); + /// IF_SVE_IG_4A_G LDFF1H {.H }, /Z, [{, , LSL #1}] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldff1h, EA_SCALABLE, REG_V3, REG_P1, REG_R4, REG_ZR, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_HX_3A_E LDFF1H {.S }, /Z, [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldff1h, EA_SCALABLE, REG_V4, REG_P7, REG_V3, 6, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector LoadVectorFirstFaulting(Vector mask, half* address) => LoadVectorFirstFaulting(mask, address); + + + /// LoadVectorNonFaulting : Unextended load, non-faulting + + /// + /// svfloat16_t svldnf1[_f16](svbool_t pg, const float16_t *base) + /// LDNF1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IL_3A_B LDNF1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1h, EA_SCALABLE, REG_V1, REG_P3, REG_R2, 5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1h, EA_SCALABLE, REG_V1, REG_P3, REG_R2, 5, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnf1h, EA_SCALABLE, REG_V1, REG_P3, REG_R2, 5, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector LoadVectorNonFaulting(half* address) => LoadVectorNonFaulting(address); + + + /// LoadVectorNonTemporal : Unextended load, non-temporal + + /// + /// svfloat16_t svldnt1[_f16](svbool_t pg, const float16_t *base) + /// LDNT1H Zresult.H, Pg/Z, [Xarray, Xindex, LSL #1] + /// LDNT1H Zresult.H, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IM_3A LDNT1H {.H }, /Z, [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_ldnt1h, EA_SCALABLE, REG_V6, REG_P7, REG_R8, 0, INS_OPTS_SCALABLE_H); + /// IF_SVE_IF_4A LDNT1H {.S }, /Z, [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_V2, REG_R3, INS_OPTS_SCALABLE_S); + /// IF_SVE_IF_4A_A LDNT1H {.D }, /Z, [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V1, REG_P4, REG_V3, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_IN_4A LDNT1H {.H }, /Z, [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_ldnt1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, REG_R5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe Vector LoadVectorNonTemporal(Vector mask, half* address) => LoadVectorNonTemporal(mask, address); + + + /// LoadVectorx2 : Load two-element tuples into two vectors + + /// + /// svfloat16x2_t svld2[_f16](svbool_t pg, const float16_t *base) + /// LD2H {Zresult0.H, Zresult1.H}, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD2H {Zresult0.H, Zresult1.H}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD2H {.H, .H }, /Z, [{, #, MUL + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld2h, EA_SCALABLE, REG_V6, REG_P5, REG_R4, 8, INS_OPTS_SCALABLE_H); + /// IF_SVE_IT_4A LD2H {.H, .H }, /Z, [, , LSL + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld2h, EA_SCALABLE, REG_V8, REG_P5, REG_R9, REG_R10, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector) LoadVectorx2(Vector mask, half* address) => LoadVectorx2(mask, address); + + + /// LoadVectorx3 : Load three-element tuples into three vectors + + /// + /// svfloat16x3_t svld3[_f16](svbool_t pg, const float16_t *base) + /// LD3H {Zresult0.H - Zresult2.H}, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD3H {Zresult0.H - Zresult2.H}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD3H {.H, .H, .H }, /Z, [{, + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld3h, EA_SCALABLE, REG_V0, REG_P0, REG_R0, 21, INS_OPTS_SCALABLE_H); + /// IF_SVE_IT_4A LD3H {.H, .H, .H }, /Z, [, + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld3h, EA_SCALABLE, REG_V30, REG_P2, REG_R9, REG_R4, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector, Vector) LoadVectorx3(Vector mask, half* address) => LoadVectorx3(mask, address); + + + /// LoadVectorx4 : Load four-element tuples into four vectors + + /// + /// svfloat16x4_t svld4[_f16](svbool_t pg, const float16_t *base) + /// LD4H {Zresult0.H - Zresult3.H}, Pg/Z, [Xarray, Xindex, LSL #1] + /// LD4H {Zresult0.H - Zresult3.H}, Pg/Z, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_IS_3A LD4H {.H, .H, .H, .H }, /Z, + /// theEmitter->emitIns_R_R_R_I(INS_sve_ld4h, EA_SCALABLE, REG_V5, REG_P4, REG_R3, -32, INS_OPTS_SCALABLE_H); + /// IF_SVE_IT_4A LD4H {.H, .H, .H, .H }, /Z, + /// theEmitter->emitIns_R_R_R_R(INS_sve_ld4h, EA_SCALABLE, REG_V13, REG_P6, REG_R5, REG_R4, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe (Vector, Vector, Vector, Vector) LoadVectorx4(Vector mask, half* address) => LoadVectorx4(mask, address); + + + /// Log2 : Base 2 logarithm as integer + + /// + /// svint16_t svlogb[_f16]_m(svint16_t inactive, svbool_t pg, svfloat16_t op) + /// FLOGB Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; FLOGB Zresult.H, Pg/M, Zop.H + /// svint16_t svlogb[_f16]_x(svbool_t pg, svfloat16_t op) + /// FLOGB Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; FLOGB Zresult.H, Pg/M, Zop.H + /// svint16_t svlogb[_f16]_z(svbool_t pg, svfloat16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; FLOGB Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_HP_3A FLOGB ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_flogb, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_flogb, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_flogb, EA_SCALABLE, REG_V31, REG_P7, REG_V31, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Log2(Vector value) => Log2(value); + + + /// Max : Maximum + + /// + /// svfloat16_t svmax[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FMAX Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; FMAX Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svfloat16_t svmax[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FMAX Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// FMAX Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; FMAX Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svfloat16_t svmax[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; FMAX Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; FMAX Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FMAX ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmax, EA_SCALABLE, REG_V30, REG_P2, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_HM_2A FMAX ., /M, ., + /// theEmitter->emitIns_R_R_F(INS_sve_fmax, EA_SCALABLE, REG_V1, REG_P0, 0.0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_F(INS_sve_fmax, EA_SCALABLE, REG_V1, REG_P0, 1.0, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Max(Vector left, Vector right) => Max(left, right); + + + /// MaxAcross : Maximum reduction to scalar + + /// + /// float16_t svmaxv[_f16](svbool_t pg, svfloat16_t op) + /// FMAXV Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_HE_3A FMAXV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_fmaxv, EA_4BYTE, REG_V23, REG_P5, REG_V5, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxAcross(Vector value) => MaxAcross(value); + + + /// MaxNumber : Maximum number + + /// + /// svfloat16_t svmaxnm[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FMAXNM Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; FMAXNM Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svfloat16_t svmaxnm[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FMAXNM Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// FMAXNM Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; FMAXNM Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svfloat16_t svmaxnm[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; FMAXNM Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; FMAXNM Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FMAXNM ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmaxnm, EA_SCALABLE, REG_V31, REG_P3, REG_V4, INS_OPTS_SCALABLE_S); + /// IF_SVE_HM_2A FMAXNM ., /M, ., + /// theEmitter->emitIns_R_R_F(INS_sve_fmaxnm, EA_SCALABLE, REG_V3, REG_P4, 0.0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_F(INS_sve_fmaxnm, EA_SCALABLE, REG_V3, REG_P4, 1.0, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxNumber(Vector left, Vector right) => MaxNumber(left, right); + + + /// MaxNumberAcross : Maximum number reduction to scalar + + /// + /// float16_t svmaxnmv[_f16](svbool_t pg, svfloat16_t op) + /// FMAXNMV Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_HE_3A FMAXNMV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_fmaxnmv, EA_2BYTE, REG_V22, REG_P6, REG_V6, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxNumberAcross(Vector value) => MaxNumberAcross(value); + + + /// MaxNumberPairwise : Maximum number pairwise + + /// + /// svfloat16_t svmaxnmp[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FMAXNMP Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; FMAXNMP Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svfloat16_t svmaxnmp[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FMAXNMP Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; FMAXNMP Zresult.H, Pg/M, Zresult.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_GR_3A FMAXNMP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmaxnmp, EA_SCALABLE, REG_V17, REG_P4, REG_V18, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxNumberPairwise(Vector left, Vector right) => MaxNumberPairwise(left, right); + + + /// MaxPairwise : Maximum pairwise + + /// + /// svfloat16_t svmaxp[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FMAXP Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; FMAXP Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svfloat16_t svmaxp[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FMAXP Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; FMAXP Zresult.H, Pg/M, Zresult.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_GR_3A FMAXP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmaxp, EA_SCALABLE, REG_V18, REG_P5, REG_V17, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MaxPairwise(Vector left, Vector right) => MaxPairwise(left, right); + + + /// Min : Minimum + + /// + /// svfloat16_t svmin[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FMIN Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; FMIN Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svfloat16_t svmin[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FMIN Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// FMIN Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; FMIN Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svfloat16_t svmin[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; FMIN Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; FMIN Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FMIN ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmin, EA_SCALABLE, REG_V0, REG_P4, REG_V3, INS_OPTS_SCALABLE_D); + /// IF_SVE_HM_2A FMIN ., /M, ., + /// theEmitter->emitIns_R_R_F(INS_sve_fmin, EA_SCALABLE, REG_V6, REG_P5, 0.0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_F(INS_sve_fmin, EA_SCALABLE, REG_V6, REG_P5, 1.0, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Min(Vector left, Vector right) => Min(left, right); + + + /// MinAcross : Minimum reduction to scalar + + /// + /// float16_t svminv[_f16](svbool_t pg, svfloat16_t op) + /// FMINV Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_HE_3A FMINV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_fminv, EA_4BYTE, REG_V25, REG_P3, REG_V3, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinAcross(Vector value) => MinAcross(value); + + + /// MinNumber : Minimum number + + /// + /// svfloat16_t svminnm[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FMINNM Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; FMINNM Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svfloat16_t svminnm[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FMINNM Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// FMINNM Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; FMINNM Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svfloat16_t svminnm[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; FMINNM Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; FMINNM Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FMINNM ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fminnm, EA_SCALABLE, REG_V1, REG_P5, REG_V2, INS_OPTS_SCALABLE_H); + /// IF_SVE_HM_2A FMINNM ., /M, ., + /// theEmitter->emitIns_R_R_F(INS_sve_fminnm, EA_SCALABLE, REG_V2, REG_P4, 0.0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_F(INS_sve_fminnm, EA_SCALABLE, REG_V2, REG_P4, 1.0, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinNumber(Vector left, Vector right) => MinNumber(left, right); + + + /// MinNumberAcross : Minimum number reduction to scalar + + /// + /// float16_t svminnmv[_f16](svbool_t pg, svfloat16_t op) + /// FMINNMV Hresult, Pg, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_HE_3A FMINNMV , , . + /// theEmitter->emitIns_R_R_R(INS_sve_fminnmv, EA_8BYTE, REG_V24, REG_P4, REG_V4, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinNumberAcross(Vector value) => MinNumberAcross(value); + + + /// MinNumberPairwise : Minimum number pairwise + + /// + /// svfloat16_t svminnmp[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FMINNMP Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; FMINNMP Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svfloat16_t svminnmp[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FMINNMP Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; FMINNMP Zresult.H, Pg/M, Zresult.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_GR_3A FMINNMP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fminnmp, EA_SCALABLE, REG_V19, REG_P6, REG_V16, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinNumberPairwise(Vector left, Vector right) => MinNumberPairwise(left, right); + + + /// MinPairwise : Minimum pairwise + + /// + /// svfloat16_t svminp[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FMINP Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; FMINP Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svfloat16_t svminp[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FMINP Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; FMINP Zresult.H, Pg/M, Zresult.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_GR_3A FMINP ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fminp, EA_SCALABLE, REG_V20, REG_P7, REG_V15, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MinPairwise(Vector left, Vector right) => MinPairwise(left, right); + + + /// Multiply : Multiply + + /// + /// svfloat16_t svmul[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FMUL Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; FMUL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svfloat16_t svmul[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FMUL Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// FMUL Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// FMUL Zresult.H, Zop1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; FMUL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svfloat16_t svmul[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; FMUL Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; FMUL Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FMUL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmul, EA_SCALABLE, REG_V2, REG_P6, REG_V1, INS_OPTS_SCALABLE_S); + /// IF_SVE_HK_3A FMUL ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmul, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_GX_3A FMUL .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V0, REG_V2, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V4, REG_V6, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V8, REG_V10, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V12, REG_V14, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_GX_3B FMUL .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V1, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V3, REG_V2, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V5, REG_V4, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V7, REG_V6, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HM_2A FMUL ., /M, ., + /// theEmitter->emitIns_R_R_F(INS_sve_fmul, EA_SCALABLE, REG_V5, REG_P1, 0.5, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_F(INS_sve_fmul, EA_SCALABLE, REG_V5, REG_P1, 2.0, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Multiply(Vector left, Vector right) => Multiply(left, right); + + + + + + /// MultiplyAddRotateComplex : Complex multiply-add with rotate + + /// + /// svfloat16_t svcmla[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_rotation) + /// FCMLA Ztied1.H, Pg/M, Zop2.H, Zop3.H, #imm_rotation + /// MOVPRFX Zresult, Zop1; FCMLA Zresult.H, Pg/M, Zop2.H, Zop3.H, #imm_rotation + /// svfloat16_t svcmla[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_rotation) + /// FCMLA Ztied1.H, Pg/M, Zop2.H, Zop3.H, #imm_rotation + /// MOVPRFX Zresult, Zop1; FCMLA Zresult.H, Pg/M, Zop2.H, Zop3.H, #imm_rotation + /// svfloat16_t svcmla[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_rotation) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; FCMLA Zresult.H, Pg/M, Zop2.H, Zop3.H, #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_GV_3A FCMLA .S, .S, .S[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_fcmla, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_fcmla, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_fcmla, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_fcmla, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1, 270, INS_OPTS_SCALABLE_S); + /// IF_SVE_GT_4A FCMLA ., /M, ., ., + /// theEmitter->emitIns_R_R_R_R_I(INS_sve_fcmla, EA_SCALABLE, REG_V2, REG_P1, REG_V3, REG_V4, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R_I(INS_sve_fcmla, EA_SCALABLE, REG_V0, REG_P2, REG_V1, REG_V5, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R_I(INS_sve_fcmla, EA_SCALABLE, REG_V2, REG_P3, REG_V0, REG_V6, 180, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R_I(INS_sve_fcmla, EA_SCALABLE, REG_V2, REG_P3, REG_V0, REG_V6, 270, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation) => MultiplyAddRotateComplex(addend, left, right, rotation); + + + /// MultiplyAddRotateComplexBySelectedScalar : Complex multiply-add with rotate + + /// + /// svfloat16_t svcmla_lane[_f16](svfloat16_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index, uint64_t imm_rotation) + /// FCMLA Ztied1.H, Zop2.H, Zop3.H[imm_index], #imm_rotation + /// MOVPRFX Zresult, Zop1; FCMLA Zresult.H, Zop2.H, Zop3.H[imm_index], #imm_rotation + /// + /// codegenarm64test: + /// IF_SVE_GV_3A FCMLA .S, .S, .S[], + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_fcmla, EA_SCALABLE, REG_V0, REG_V1, REG_V0, 0, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_fcmla, EA_SCALABLE, REG_V2, REG_V3, REG_V5, 1, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_fcmla, EA_SCALABLE, REG_V4, REG_V5, REG_V10, 0, 180, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I_I(INS_sve_fcmla, EA_SCALABLE, REG_V6, REG_V7, REG_V15, 1, 270, INS_OPTS_SCALABLE_S); + /// IF_SVE_GT_4A FCMLA ., /M, ., ., + /// theEmitter->emitIns_R_R_R_R_I(INS_sve_fcmla, EA_SCALABLE, REG_V2, REG_P1, REG_V3, REG_V4, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_R_I(INS_sve_fcmla, EA_SCALABLE, REG_V0, REG_P2, REG_V1, REG_V5, 90, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_R_I(INS_sve_fcmla, EA_SCALABLE, REG_V2, REG_P3, REG_V0, REG_V6, 180, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_R_I(INS_sve_fcmla, EA_SCALABLE, REG_V2, REG_P3, REG_V0, REG_V6, 270, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation) => MultiplyAddRotateComplexBySelectedScalar(addend, left, right, rightIndex, rotation); + + + /// MultiplyAddWideningLower : Multiply-add long (bottom) + + /// + /// svfloat32_t svmlalb[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3) + /// FMLALB Ztied1.S, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; FMLALB Zresult.S, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_GN_3A FMLALB .H, .B, .B + /// theEmitter->emitIns_R_R_R(INS_sve_fmlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// IF_SVE_GZ_3A FMLALB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmlalb, EA_SCALABLE, REG_V8, REG_V9, REG_V4, 4, INS_OPTS_SCALABLE_H); + /// IF_SVE_HB_3A FMLALB .S, .H, .H + /// theEmitter->emitIns_R_R_R(INS_sve_fmlalb, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningLower(op1, op2, op3); + + /// + /// svfloat32_t svmlalb_lane[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index) + /// FMLALB Ztied1.S, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; FMLALB Zresult.S, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_GN_3A FMLALB .H, .B, .B + /// theEmitter->emitIns_R_R_R(INS_sve_fmlalb, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// IF_SVE_GZ_3A FMLALB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmlalb, EA_SCALABLE, REG_V8, REG_V9, REG_V4, 4, INS_OPTS_SCALABLE_H); + /// IF_SVE_HB_3A FMLALB .S, .H, .H + /// theEmitter->emitIns_R_R_R(INS_sve_fmlalb, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector MultiplyAddWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplyAddWideningLower(op1, op2, op3, imm_index); + + + /// MultiplyAddWideningUpper : Multiply-add long (top) + + /// + /// svfloat32_t svmlalt[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3) + /// FMLALT Ztied1.S, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; FMLALT Zresult.S, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_GN_3A FMLALT .H, .B, .B + /// theEmitter->emitIns_R_R_R(INS_sve_fmlalt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_B); + /// IF_SVE_GZ_3A FMLALT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmlalt, EA_SCALABLE, REG_V10, REG_V11, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// IF_SVE_HB_3A FMLALT .S, .H, .H + /// theEmitter->emitIns_R_R_R(INS_sve_fmlalt, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplyAddWideningUpper(op1, op2, op3); + + /// + /// svfloat32_t svmlalt_lane[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index) + /// FMLALT Ztied1.S, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; FMLALT Zresult.S, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_GN_3A FMLALT .H, .B, .B + /// theEmitter->emitIns_R_R_R(INS_sve_fmlalt, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_B); + /// IF_SVE_GZ_3A FMLALT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmlalt, EA_SCALABLE, REG_V10, REG_V11, REG_V5, 5, INS_OPTS_SCALABLE_H); + /// IF_SVE_HB_3A FMLALT .S, .H, .H + /// theEmitter->emitIns_R_R_R(INS_sve_fmlalt, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector MultiplyAddWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplyAddWideningUpper(op1, op2, op3, imm_index); + + + /// MultiplyBySelectedScalar : Multiply + + /// + /// svfloat16_t svmul_lane[_f16](svfloat16_t op1, svfloat16_t op2, uint64_t imm_index) + /// FMUL Zresult.H, Zop1.H, Zop2.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FMUL ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmul, EA_SCALABLE, REG_V2, REG_P6, REG_V1, INS_OPTS_SCALABLE_S); + /// IF_SVE_HK_3A FMUL ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmul, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_GX_3A FMUL .S, .S, .S[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V0, REG_V2, REG_V1, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V4, REG_V6, REG_V3, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V8, REG_V10, REG_V5, 2, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V12, REG_V14, REG_V7, 3, INS_OPTS_SCALABLE_S); + /// IF_SVE_GX_3B FMUL .D, .D, .D[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V1, REG_V0, REG_V0, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V3, REG_V2, REG_V5, 1, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V5, REG_V4, REG_V10, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmul, EA_SCALABLE, REG_V7, REG_V6, REG_V15, 1, INS_OPTS_SCALABLE_D); + /// IF_SVE_HM_2A FMUL ., /M, ., + /// theEmitter->emitIns_R_R_F(INS_sve_fmul, EA_SCALABLE, REG_V5, REG_P1, 0.5, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_F(INS_sve_fmul, EA_SCALABLE, REG_V5, REG_P1, 2.0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); + + + /// MultiplyExtended : Multiply extended (∞×0=2) + + /// + /// svfloat16_t svmulx[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FMULX Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; FMULX Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svfloat16_t svmulx[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FMULX Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// FMULX Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// MOVPRFX Zresult, Zop1; FMULX Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svfloat16_t svmulx[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; FMULX Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; FMULX Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FMULX ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fmulx, EA_SCALABLE, REG_V3, REG_P7, REG_V0, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector MultiplyExtended(Vector left, Vector right) => MultiplyExtended(left, right); + + + + + + /// MultiplySubtractWideningLower : Multiply-subtract long (bottom) + + /// + /// svfloat32_t svmlslb[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3) + /// FMLSLB Ztied1.S, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; FMLSLB Zresult.S, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_GZ_3A FMLSLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmlslb, EA_SCALABLE, REG_V12, REG_V13, REG_V6, 6, INS_OPTS_SCALABLE_H); + /// IF_SVE_HB_3A FMLSLB .S, .H, .H + /// theEmitter->emitIns_R_R_R(INS_sve_fmlslb, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningLower(op1, op2, op3); + + /// + /// svfloat32_t svmlslb_lane[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index) + /// FMLSLB Ztied1.S, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; FMLSLB Zresult.S, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_GZ_3A FMLSLB .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmlslb, EA_SCALABLE, REG_V12, REG_V13, REG_V6, 6, INS_OPTS_SCALABLE_H); + /// IF_SVE_HB_3A FMLSLB .S, .H, .H + /// theEmitter->emitIns_R_R_R(INS_sve_fmlslb, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector MultiplySubtractWideningLower(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplySubtractWideningLower(op1, op2, op3, imm_index); + + + /// MultiplySubtractWideningUpper : Multiply-subtract long (top) + + /// + /// svfloat32_t svmlslt[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3) + /// FMLSLT Ztied1.S, Zop2.H, Zop3.H + /// MOVPRFX Zresult, Zop1; FMLSLT Zresult.S, Zop2.H, Zop3.H + /// + /// codegenarm64test: + /// IF_SVE_GZ_3A FMLSLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmlslt, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_HB_3A FMLSLT .S, .H, .H + /// theEmitter->emitIns_R_R_R(INS_sve_fmlslt, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3) => MultiplySubtractWideningUpper(op1, op2, op3); + + /// + /// svfloat32_t svmlslt_lane[_f32](svfloat32_t op1, svfloat16_t op2, svfloat16_t op3, uint64_t imm_index) + /// FMLSLT Ztied1.S, Zop2.H, Zop3.H[imm_index] + /// MOVPRFX Zresult, Zop1; FMLSLT Zresult.S, Zop2.H, Zop3.H[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_GZ_3A FMLSLT .S, .H, .H[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_fmlslt, EA_SCALABLE, REG_V14, REG_V15, REG_V7, 7, INS_OPTS_SCALABLE_H); + /// IF_SVE_HB_3A FMLSLT .S, .H, .H + /// theEmitter->emitIns_R_R_R(INS_sve_fmlslt, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector MultiplySubtractWideningUpper(Vector op1, Vector op2, Vector op3, ulong imm_index) => MultiplySubtractWideningUpper(op1, op2, op3, imm_index); + + + /// Negate : Negate + + /// + /// svfloat16_t svneg[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) + /// FNEG Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; FNEG Zresult.H, Pg/M, Zop.H + /// svfloat16_t svneg[_f16]_x(svbool_t pg, svfloat16_t op) + /// FNEG Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; FNEG Zresult.H, Pg/M, Zop.H + /// svfloat16_t svneg[_f16]_z(svbool_t pg, svfloat16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; FNEG Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_AP_3A FNEG ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_fneg, EA_SCALABLE, REG_V26, REG_P5, REG_V5, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Negate(Vector value) => Negate(value); + + + + + /// PopCount : Count nonzero bits + + /// + /// svuint16_t svcnt[_f16]_m(svuint16_t inactive, svbool_t pg, svfloat16_t op) + /// CNT Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; CNT Zresult.H, Pg/M, Zop.H + /// svuint16_t svcnt[_f16]_x(svbool_t pg, svfloat16_t op) + /// CNT Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; CNT Zresult.H, Pg/M, Zop.H + /// svuint16_t svcnt[_f16]_z(svbool_t pg, svfloat16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; CNT Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_AP_3A CNT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_cnt, EA_SCALABLE, REG_V28, REG_P3, REG_V3, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector PopCount(Vector value) => PopCount(value); + + + /// ReciprocalEstimate : Reciprocal estimate + + /// + /// svfloat16_t svrecpe[_f16](svfloat16_t op) + /// FRECPE Zresult.H, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_HF_2A FRECPE ., . + /// theEmitter->emitIns_R_R(INS_sve_frecpe, EA_SCALABLE, REG_V0, REG_V2, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ReciprocalEstimate(Vector value) => ReciprocalEstimate(value); + + + /// ReciprocalExponent : Reciprocal exponent + + /// + /// svfloat16_t svrecpx[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) + /// FRECPX Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; FRECPX Zresult.H, Pg/M, Zop.H + /// svfloat16_t svrecpx[_f16]_x(svbool_t pg, svfloat16_t op) + /// FRECPX Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; FRECPX Zresult.H, Pg/M, Zop.H + /// svfloat16_t svrecpx[_f16]_z(svbool_t pg, svfloat16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; FRECPX Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_HR_3A FRECPX ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_frecpx, EA_SCALABLE, REG_V5, REG_P5, REG_V5, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector ReciprocalExponent(Vector value) => ReciprocalExponent(value); + + + /// ReciprocalSqrtEstimate : Reciprocal square root estimate + + /// + /// svfloat16_t svrsqrte[_f16](svfloat16_t op) + /// FRSQRTE Zresult.H, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_HF_2A FRSQRTE ., . + /// theEmitter->emitIns_R_R(INS_sve_frsqrte, EA_SCALABLE, REG_V5, REG_V3, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_frsqrte, EA_SCALABLE, REG_V9, REG_V5, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector ReciprocalSqrtEstimate(Vector value) => ReciprocalSqrtEstimate(value); + + + /// ReciprocalSqrtStep : Reciprocal square root step + + /// + /// svfloat16_t svrsqrts[_f16](svfloat16_t op1, svfloat16_t op2) + /// FRSQRTS Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_HK_3A FRSQRTS ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_frsqrts, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ReciprocalSqrtStep(Vector left, Vector right) => ReciprocalSqrtStep(left, right); + + + /// ReciprocalStep : Reciprocal step + + /// + /// svfloat16_t svrecps[_f16](svfloat16_t op1, svfloat16_t op2) + /// FRECPS Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_HK_3A FRECPS ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_frecps, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ReciprocalStep(Vector left, Vector right) => ReciprocalStep(left, right); + + + /// ReverseElement : Reverse all elements + + /// + /// svfloat16_t svrev[_f16](svfloat16_t op) + /// REV Zresult.H, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_CJ_2A REV ., . + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P1, REG_P2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P4, REG_P5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P3, REG_P7, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_P0, REG_P6, INS_OPTS_SCALABLE_D); + /// IF_SVE_CG_2A REV ., . + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V3, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V4, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V7, REG_V1, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R(INS_sve_rev, EA_SCALABLE, REG_V2, REG_V5, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector ReverseElement(Vector value) => ReverseElement(value); + + + /// RoundAwayFromZero : Round to nearest, ties away from zero + + /// + /// svfloat16_t svrinta[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) + /// FRINTA Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; FRINTA Zresult.H, Pg/M, Zop.H + /// svfloat16_t svrinta[_f16]_x(svbool_t pg, svfloat16_t op) + /// FRINTA Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; FRINTA Zresult.H, Pg/M, Zop.H + /// svfloat16_t svrinta[_f16]_z(svbool_t pg, svfloat16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; FRINTA Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_HQ_3A FRINTA ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_frinta, EA_SCALABLE, REG_V26, REG_P7, REG_V2, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector RoundAwayFromZero(Vector value) => RoundAwayFromZero(value); + + + /// RoundToNearest : Round to nearest, ties to even + + /// + /// svfloat16_t svrintn[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) + /// FRINTN Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; FRINTN Zresult.H, Pg/M, Zop.H + /// svfloat16_t svrintn[_f16]_x(svbool_t pg, svfloat16_t op) + /// FRINTN Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; FRINTN Zresult.H, Pg/M, Zop.H + /// svfloat16_t svrintn[_f16]_z(svbool_t pg, svfloat16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; FRINTN Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_HQ_3A FRINTN ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_frintn, EA_SCALABLE, REG_V29, REG_P4, REG_V10, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector RoundToNearest(Vector value) => RoundToNearest(value); + + + /// RoundToNegativeInfinity : Round towards -∞ + + /// + /// svfloat16_t svrintm[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) + /// FRINTM Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; FRINTM Zresult.H, Pg/M, Zop.H + /// svfloat16_t svrintm[_f16]_x(svbool_t pg, svfloat16_t op) + /// FRINTM Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; FRINTM Zresult.H, Pg/M, Zop.H + /// svfloat16_t svrintm[_f16]_z(svbool_t pg, svfloat16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; FRINTM Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_HQ_3A FRINTM ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_frintm, EA_SCALABLE, REG_V28, REG_P5, REG_V0, INS_OPTS_SCALABLE_D); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector RoundToNegativeInfinity(Vector value) => RoundToNegativeInfinity(value); + + + /// RoundToPositiveInfinity : Round towards +∞ + + /// + /// svfloat16_t svrintp[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) + /// FRINTP Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; FRINTP Zresult.H, Pg/M, Zop.H + /// svfloat16_t svrintp[_f16]_x(svbool_t pg, svfloat16_t op) + /// FRINTP Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; FRINTP Zresult.H, Pg/M, Zop.H + /// svfloat16_t svrintp[_f16]_z(svbool_t pg, svfloat16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; FRINTP Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_HQ_3A FRINTP ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_frintp, EA_SCALABLE, REG_V30, REG_P3, REG_V11, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector RoundToPositiveInfinity(Vector value) => RoundToPositiveInfinity(value); + + + /// RoundToZero : Round towards zero + + /// + /// svfloat16_t svrintz[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) + /// FRINTZ Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; FRINTZ Zresult.H, Pg/M, Zop.H + /// svfloat16_t svrintz[_f16]_x(svbool_t pg, svfloat16_t op) + /// FRINTZ Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; FRINTZ Zresult.H, Pg/M, Zop.H + /// svfloat16_t svrintz[_f16]_z(svbool_t pg, svfloat16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; FRINTZ Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_HQ_3A FRINTZ ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_frintz, EA_SCALABLE, REG_V0, REG_P0, REG_V13, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector RoundToZero(Vector value) => RoundToZero(value); + + + + + /// Scale : Adjust exponent + + /// + /// svfloat16_t svscale[_f16]_m(svbool_t pg, svfloat16_t op1, svint16_t op2) + /// FSCALE Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; FSCALE Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svfloat16_t svscale[_f16]_x(svbool_t pg, svfloat16_t op1, svint16_t op2) + /// FSCALE Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; FSCALE Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svfloat16_t svscale[_f16]_z(svbool_t pg, svfloat16_t op1, svint16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; FSCALE Zresult.H, Pg/M, Zresult.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FSCALE ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fscale, EA_SCALABLE, REG_V4, REG_P6, REG_V31, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Scale(Vector left, Vector right) => Scale(left, right); + + + /// Splice : Splice two vectors under predicate control + + /// + /// svfloat16_t svsplice[_f16](svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// SPLICE Ztied1.H, Pg, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; SPLICE Zresult.H, Pg, Zresult.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_CV_3A SPLICE ., , {., .} + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V0, REG_P0, REG_V30, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V3, REG_P7, REG_V27, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// IF_SVE_CV_3B SPLICE ., , ., . + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V1, REG_P1, REG_V29, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R(INS_sve_splice, EA_SCALABLE, REG_V2, REG_P6, REG_V28, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector Splice(Vector mask, Vector left, Vector right) => Splice(mask, left, right); + + + /// Sqrt : Square root + + /// + /// svfloat16_t svsqrt[_f16]_m(svfloat16_t inactive, svbool_t pg, svfloat16_t op) + /// FSQRT Ztied.H, Pg/M, Zop.H + /// MOVPRFX Zresult, Zinactive; FSQRT Zresult.H, Pg/M, Zop.H + /// svfloat16_t svsqrt[_f16]_x(svbool_t pg, svfloat16_t op) + /// FSQRT Ztied.H, Pg/M, Ztied.H + /// MOVPRFX Zresult, Zop; FSQRT Zresult.H, Pg/M, Zop.H + /// svfloat16_t svsqrt[_f16]_z(svbool_t pg, svfloat16_t op) + /// MOVPRFX Zresult.H, Pg/Z, Zop.H; FSQRT Zresult.H, Pg/M, Zop.H + /// + /// codegenarm64test: + /// IF_SVE_HR_3A FSQRT ., /M, . + /// theEmitter->emitIns_R_R_R(INS_sve_fsqrt, EA_SCALABLE, REG_V6, REG_P6, REG_V6, INS_OPTS_SCALABLE_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Sqrt(Vector value) => Sqrt(value); + + + /// Store : Non-truncating store + + /// + /// void svst1[_f16](svbool_t pg, float16_t *base, svfloat16_t data) + /// ST1H Zdata.H, Pg, [Xarray, Xindex, LSL #1] + /// ST1H Zdata.H, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JD_4A ST1H {.}, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P6, REG_R1, REG_R2, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_R4, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_LSL_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P2, REG_R4, REG_R0, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4A ST1H {.S }, , [, .S, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V3, REG_P1, REG_R5, REG_V4, INS_OPTS_SCALABLE_S_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_B ST1H {.D }, , [, .D, #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_UXTW, INS_SCALABLE_OPTS_MOD_N); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V2, REG_P3, REG_R1, REG_V4, INS_OPTS_SCALABLE_D_SXTW, INS_SCALABLE_OPTS_MOD_N); + /// IF_SVE_JJ_4A_C ST1H {.D }, , [, .D, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P5, REG_R1, REG_V3, INS_OPTS_SCALABLE_D_SXTW); + /// IF_SVE_JJ_4A_D ST1H {.S }, , [, .S, ] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_UXTW); + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V7, REG_P5, REG_R4, REG_V1, INS_OPTS_SCALABLE_S_SXTW); + /// IF_SVE_JN_3A ST1H {.}, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 3, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V0, REG_P3, REG_R4, -2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JJ_4B ST1H {.D }, , [, .D, LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, REG_V4, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_LSL_N); + /// IF_SVE_JJ_4B_E ST1H {.D }, , [, .D] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st1h, EA_SCALABLE, REG_V1, REG_P4, REG_R3, REG_V2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JI_3A_A ST1H {.S }, , [.S{, #}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 0, INS_OPTS_SCALABLE_D); + /// theEmitter->emitIns_R_R_R_I(INS_sve_st1h, EA_SCALABLE, REG_V5, REG_P3, REG_V2, 62, INS_OPTS_SCALABLE_D); + /// + public static unsafe void Store(Vector mask, half* address, Vector data) => Store(mask, address, data); + + /// + /// void svst2[_f16](svbool_t pg, float16_t *base, svfloat16x2_t data) + /// ST2H {Zdata0.H, Zdata1.H}, Pg, [Xarray, Xindex, LSL #1] + /// ST2H {Zdata0.H, Zdata1.H}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST2H {.H, .H }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_st2h, EA_SCALABLE, REG_V6, REG_P7, REG_R8, -16, INS_OPTS_SCALABLE_H); + /// IF_SVE_JC_4A ST2H {.H, .H }, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_st2h, EA_SCALABLE, REG_V2, REG_P3, REG_R5, REG_R6, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, half* address, (Vector Value1, Vector Value2) data) => Store(mask, address, Value1,); + + /// + /// void svst3[_f16](svbool_t pg, float16_t *base, svfloat16x3_t data) + /// ST3H {Zdata0.H - Zdata2.H}, Pg, [Xarray, Xindex, LSL #1] + /// ST3H {Zdata0.H - Zdata2.H}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST3H {.H, .H, .H }, , [{, #, + /// theEmitter->emitIns_R_R_R_I(INS_sve_st3h, EA_SCALABLE, REG_V1, REG_P2, REG_R3, -24, INS_OPTS_SCALABLE_H); + /// IF_SVE_JC_4A ST3H {.H, .H, .H }, , [, , + /// theEmitter->emitIns_R_R_R_R(INS_sve_st3h, EA_SCALABLE, REG_V1, REG_P0, REG_R3, REG_R8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, half* address, (Vector Value1, Vector Value2, Vector Value3) data) => Store(mask, address, Value1,); + + /// + /// void svst4[_f16](svbool_t pg, float16_t *base, svfloat16x4_t data) + /// ST4H {Zdata0.H - Zdata3.H}, Pg, [Xarray, Xindex, LSL #1] + /// ST4H {Zdata0.H - Zdata3.H}, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JO_3A ST4H {.H, .H, .H, .H }, , [{, + /// theEmitter->emitIns_R_R_R_I(INS_sve_st4h, EA_SCALABLE, REG_V3, REG_P5, REG_R2, -32, INS_OPTS_SCALABLE_H); + /// IF_SVE_JC_4A ST4H {.H, .H, .H, .H }, , + /// theEmitter->emitIns_R_R_R_R(INS_sve_st4h, EA_SCALABLE, REG_V1, REG_P0, REG_R9, REG_R8, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void Store(Vector mask, half* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data) => Store(mask, address, Value1,); + + + /// StoreNonTemporal : Non-truncating store, non-temporal + + /// + /// void svstnt1[_f16](svbool_t pg, float16_t *base, svfloat16_t data) + /// STNT1H Zdata.H, Pg, [Xarray, Xindex, LSL #1] + /// STNT1H Zdata.H, Pg, [Xbase, #0, MUL VL] + /// + /// codegenarm64test: + /// IF_SVE_JM_3A STNT1H {.H }, , [{, #, MUL VL}] + /// theEmitter->emitIns_R_R_R_I(INS_sve_stnt1h, EA_SCALABLE, REG_V9, REG_P1, REG_R0, -5, INS_OPTS_SCALABLE_H); + /// IF_SVE_IZ_4A STNT1H {.S }, , [.S{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V2, REG_P7, REG_V6, REG_R5, INS_OPTS_SCALABLE_S); + /// IF_SVE_IZ_4A_A STNT1H {.D }, , [.D{, }] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V5, REG_P3, REG_V1, REG_R2, INS_OPTS_SCALABLE_D); + /// IF_SVE_JB_4A STNT1H {.H }, , [, , LSL #1] + /// theEmitter->emitIns_R_R_R_R(INS_sve_stnt1h, EA_SCALABLE, REG_V0, REG_P1, REG_R2, REG_R3, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_LSL_N); + /// + public static unsafe void StoreNonTemporal(Vector mask, half* address, Vector data) => StoreNonTemporal(mask, address, data); + + + /// Subtract : Subtract + + /// + /// svfloat16_t svsub[_f16]_m(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FSUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; FSUB Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svfloat16_t svsub[_f16]_x(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// FSUB Ztied1.H, Pg/M, Ztied1.H, Zop2.H + /// FSUBR Ztied2.H, Pg/M, Ztied2.H, Zop1.H + /// FSUB Zresult.H, Zop1.H, Zop2.H + /// MOVPRFX Zresult, Zop1; FSUB Zresult.H, Pg/M, Zresult.H, Zop2.H + /// svfloat16_t svsub[_f16]_z(svbool_t pg, svfloat16_t op1, svfloat16_t op2) + /// MOVPRFX Zresult.H, Pg/Z, Zop1.H; FSUB Zresult.H, Pg/M, Zresult.H, Zop2.H + /// MOVPRFX Zresult.H, Pg/Z, Zop2.H; FSUBR Zresult.H, Pg/M, Zresult.H, Zop1.H + /// + /// codegenarm64test: + /// IF_SVE_HL_3A FSUB ., /M, ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fsub, EA_SCALABLE, REG_V5, REG_P5, REG_V30, INS_OPTS_SCALABLE_S); + /// IF_SVE_HK_3A FSUB ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_fsub, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_HM_2A FSUB ., /M, ., + /// theEmitter->emitIns_R_R_F(INS_sve_fsub, EA_SCALABLE, REG_V7, REG_P2, 0.5, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_F(INS_sve_fsub, EA_SCALABLE, REG_V7, REG_P2, 1.0, INS_OPTS_SCALABLE_H); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector Subtract(Vector left, Vector right) => Subtract(left, right); + + + + /// TransposeEven : Interleave even elements from two inputs + + /// + /// svfloat16_t svtrn1[_f16](svfloat16_t op1, svfloat16_t op2) + /// TRN1 Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn1, EA_SCALABLE, REG_P1, REG_P3, REG_P4, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector TransposeEven(Vector left, Vector right) => TransposeEven(left, right); + + + /// TransposeOdd : Interleave odd elements from two inputs + + /// + /// svfloat16_t svtrn2[_f16](svfloat16_t op1, svfloat16_t op2) + /// TRN2 Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_BR_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B TRN2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A TRN2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_trn2, EA_SCALABLE, REG_P5, REG_P2, REG_P7, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector TransposeOdd(Vector left, Vector right) => TransposeOdd(left, right); + + + /// TrigonometricMultiplyAddCoefficient : Trigonometric multiply-add coefficient + + /// + /// svfloat16_t svtmad[_f16](svfloat16_t op1, svfloat16_t op2, uint64_t imm3) + /// FTMAD Ztied1.H, Ztied1.H, Zop2.H, #imm3 + /// MOVPRFX Zresult, Zop1; FTMAD Zresult.H, Zresult.H, Zop2.H, #imm3 + /// + /// codegenarm64test: + /// IF_SVE_HN_2A FTMAD ., ., ., # + /// theEmitter->emitIns_R_R_I(INS_sve_ftmad, EA_SCALABLE, REG_V0, REG_V2, 0, INS_OPTS_SCALABLE_H); + /// theEmitter->emitIns_R_R_I(INS_sve_ftmad, EA_SCALABLE, REG_V3, REG_V5, 1, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_I(INS_sve_ftmad, EA_SCALABLE, REG_V4, REG_V2, 7, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector TrigonometricMultiplyAddCoefficient(Vector left, Vector right, [ConstantExpected] byte control) => TrigonometricMultiplyAddCoefficient(left, right, control); + + + /// TrigonometricSelectCoefficient : Trigonometric select coefficient + + /// + /// svfloat16_t svtssel[_f16](svfloat16_t op1, svuint16_t op2) + /// FTSSEL Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_BK_3A FTSSEL ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_ftssel, EA_SCALABLE, REG_V17, REG_V16, REG_V15, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector TrigonometricSelectCoefficient(Vector value, Vector selector) => TrigonometricSelectCoefficient(value, selector); + + + /// TrigonometricStartingValue : Trigonometric starting value + + /// + /// svfloat16_t svtsmul[_f16](svfloat16_t op1, svuint16_t op2) + /// FTSMUL Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_HK_3A FTSMUL ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_ftsmul, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// + public static unsafe Vector TrigonometricStartingValue(Vector value, Vector sign) => TrigonometricStartingValue(value, sign); + + + /// UnzipEven : Concatenate even elements from two inputs + + /// + /// svfloat16_t svuzp1[_f16](svfloat16_t op1, svfloat16_t op2) + /// UZP1 Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector UnzipEven(Vector left, Vector right) => UnzipEven(left, right); + + + /// UnzipOdd : Concatenate odd elements from two inputs + + /// + /// svfloat16_t svuzp2[_f16](svfloat16_t op1, svfloat16_t op2) + /// UZP2 Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_BR_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V18, REG_V19, REG_V20, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V21, REG_V22, REG_V23, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B UZP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A UZP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_uzp2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector UnzipOdd(Vector left, Vector right) => UnzipOdd(left, right); + + + /// UpConvertWideningUpper : Up convert long (top) + + /// + /// svfloat32_t svcvtlt_f32[_f16]_m(svfloat32_t inactive, svbool_t pg, svfloat16_t op) + /// FCVTLT Ztied.S, Pg/M, Zop.H + /// svfloat32_t svcvtlt_f32[_f16]_x(svbool_t pg, svfloat16_t op) + /// FCVTLT Ztied.S, Pg/M, Ztied.H + /// + /// codegenarm64test: + /// IF_SVE_GQ_3A FCVTLT .D, /M, .S + /// theEmitter->emitIns_R_R_R(INS_sve_fcvtlt, EA_SCALABLE, REG_V0, REG_P7, REG_V1, INS_OPTS_S_TO_D); + /// theEmitter->emitIns_R_R_R(INS_sve_fcvtlt, EA_SCALABLE, REG_V14, REG_P7, REG_V20, INS_OPTS_H_TO_S); + /// + /// Embedded arg1 mask predicate + /// + public static unsafe Vector UpConvertWideningUpper(Vector value) => UpConvertWideningUpper(value); + + + /// VectorTableLookup : Table lookup in single-vector table + + /// + /// svfloat16_t svtbl[_f16](svfloat16_t data, svuint16_t indices) + /// TBL Zresult.H, Zdata.H, Zindices.H + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector VectorTableLookup(Vector data, Vector indices) => VectorTableLookup(data, indices); + + /// + /// svfloat16_t svtbl2[_f16](svfloat16x2_t data, svuint16_t indices) + /// TBL Zresult.H, {Zdata0.H, Zdata1.H}, Zindices.H + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBL ., {.}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H); + /// IF_SVE_BZ_3A_A TBL ., {., .}, . + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// theEmitter->emitIns_R_R_R(INS_sve_tbl, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_WITH_VECTOR_PAIR); + /// + public static unsafe Vector VectorTableLookup((Vector data1, Vector data2), Vector indices) => VectorTableLookup(data1,, indices); + + + /// VectorTableLookupExtension : Table lookup in single-vector table (merging) + + /// + /// svfloat16_t svtbx[_f16](svfloat16_t fallback, svfloat16_t data, svuint16_t indices) + /// TBX Ztied.H, Zdata.H, Zindices.H + /// + /// codegenarm64test: + /// IF_SVE_BZ_3A TBX ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_tbx, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// theEmitter->emitIns_R_R_R(INS_sve_tbx, EA_SCALABLE, REG_V9, REG_V10, REG_V11, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector VectorTableLookupExtension(Vector fallback, Vector data, Vector indices) => VectorTableLookupExtension(fallback, data, indices); + + + /// ZipHigh : Interleave elements from high halves of two inputs + + /// + /// svfloat16_t svzip2[_f16](svfloat16_t op1, svfloat16_t op2) + /// ZIP2 Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V30, REG_V31, REG_V0, INS_OPTS_SCALABLE_S, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V1, REG_V2, REG_V3, INS_OPTS_SCALABLE_D, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP2 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_V15, REG_V16, REG_V17, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP2 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip2, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_H); + /// + public static unsafe Vector ZipHigh(Vector left, Vector right) => ZipHigh(left, right); + + + /// ZipLow : Interleave elements from low halves of two inputs + + /// + /// svfloat16_t svzip1[_f16](svfloat16_t op1, svfloat16_t op2) + /// ZIP1 Zresult.H, Zop1.H, Zop2.H + /// + /// codegenarm64test: + /// IF_SVE_BR_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V24, REG_V25, REG_V26, INS_OPTS_SCALABLE_B, INS_SCALABLE_OPTS_UNPREDICATED); + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V27, REG_V28, REG_V29, INS_OPTS_SCALABLE_H, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_BR_3B ZIP1 .Q, .Q, .Q + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_V12, REG_V13, REG_V14, INS_OPTS_SCALABLE_Q, INS_SCALABLE_OPTS_UNPREDICATED); + /// IF_SVE_CI_3A ZIP1 ., ., . + /// theEmitter->emitIns_R_R_R(INS_sve_zip1, EA_SCALABLE, REG_P0, REG_P0, REG_P0, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector ZipLow(Vector left, Vector right) => ZipLow(left, right); + + } +} + diff --git a/sve_api/out_helper_api/SveI8mm.cs b/sve_api/out_helper_api/SveI8mm.cs new file mode 100644 index 0000000000000..a29931e9dd287 --- /dev/null +++ b/sve_api/out_helper_api/SveI8mm.cs @@ -0,0 +1,130 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveI8mm : AdvSimd + { + internal SveI8mm() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// DotProductSignedUnsigned : Dot product (signed × unsigned) + + /// + /// svint32_t svsudot[_s32](svint32_t op1, svint8_t op2, svuint8_t op3) + /// USDOT Ztied1.S, Zop3.B, Zop2.B + /// MOVPRFX Zresult, Zop1; USDOT Zresult.S, Zop3.B, Zop2.B + /// + /// codegenarm64test: + /// IF_SVE_EZ_3A USDOT .S, .B, .B[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_usdot, EA_SCALABLE, REG_V21, REG_V22, REG_V2, 2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_usdot, EA_SCALABLE, REG_V23, REG_V24, REG_V3, 3, INS_OPTS_SCALABLE_B); + /// IF_SVE_EI_3A USDOT .S, .B, .B + /// theEmitter->emitIns_R_R_R(INS_sve_usdot, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector DotProductSignedUnsigned(Vector op1, Vector op2, Vector op3) => DotProductSignedUnsigned(op1, op2, op3); + + /// + /// svint32_t svsudot_lane[_s32](svint32_t op1, svint8_t op2, svuint8_t op3, uint64_t imm_index) + /// SUDOT Ztied1.S, Zop2.B, Zop3.B[imm_index] + /// MOVPRFX Zresult, Zop1; SUDOT Zresult.S, Zop2.B, Zop3.B[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EZ_3A SUDOT .S, .B, .B[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_sudot, EA_SCALABLE, REG_V17, REG_V18, REG_V0, 0, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_sudot, EA_SCALABLE, REG_V19, REG_V20, REG_V1, 1, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector DotProductSignedUnsigned(Vector op1, Vector op2, Vector op3, ulong imm_index) => DotProductSignedUnsigned(op1, op2, op3, imm_index); + + + /// DotProductUnsignedSigned : Dot product (unsigned × signed) + + /// + /// svint32_t svusdot[_s32](svint32_t op1, svuint8_t op2, svint8_t op3) + /// USDOT Ztied1.S, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; USDOT Zresult.S, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_EZ_3A USDOT .S, .B, .B[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_usdot, EA_SCALABLE, REG_V21, REG_V22, REG_V2, 2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_usdot, EA_SCALABLE, REG_V23, REG_V24, REG_V3, 3, INS_OPTS_SCALABLE_B); + /// IF_SVE_EI_3A USDOT .S, .B, .B + /// theEmitter->emitIns_R_R_R(INS_sve_usdot, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector DotProductUnsignedSigned(Vector op1, Vector op2, Vector op3) => DotProductUnsignedSigned(op1, op2, op3); + + /// + /// svint32_t svusdot_lane[_s32](svint32_t op1, svuint8_t op2, svint8_t op3, uint64_t imm_index) + /// USDOT Ztied1.S, Zop2.B, Zop3.B[imm_index] + /// MOVPRFX Zresult, Zop1; USDOT Zresult.S, Zop2.B, Zop3.B[imm_index] + /// + /// codegenarm64test: + /// IF_SVE_EZ_3A USDOT .S, .B, .B[] + /// theEmitter->emitIns_R_R_R_I(INS_sve_usdot, EA_SCALABLE, REG_V21, REG_V22, REG_V2, 2, INS_OPTS_SCALABLE_B); + /// theEmitter->emitIns_R_R_R_I(INS_sve_usdot, EA_SCALABLE, REG_V23, REG_V24, REG_V3, 3, INS_OPTS_SCALABLE_B); + /// IF_SVE_EI_3A USDOT .S, .B, .B + /// theEmitter->emitIns_R_R_R(INS_sve_usdot, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_B); + /// + public static unsafe Vector DotProductUnsignedSigned(Vector op1, Vector op2, Vector op3, ulong imm_index) => DotProductUnsignedSigned(op1, op2, op3, imm_index); + + + /// MatrixMultiplyAccumulate : Matrix multiply-accumulate + + /// + /// svint32_t svmmla[_s32](svint32_t op1, svint8_t op2, svint8_t op3) + /// SMMLA Ztied1.S, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; SMMLA Zresult.S, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_FO_3A SMMLA .S, .B, .B + /// theEmitter->emitIns_R_R_R(INS_sve_smmla, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MatrixMultiplyAccumulate(Vector op1, Vector op2, Vector op3) => MatrixMultiplyAccumulate(op1, op2, op3); + + /// + /// svuint32_t svmmla[_u32](svuint32_t op1, svuint8_t op2, svuint8_t op3) + /// UMMLA Ztied1.S, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; UMMLA Zresult.S, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_FO_3A UMMLA .S, .B, .B + /// theEmitter->emitIns_R_R_R(INS_sve_ummla, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MatrixMultiplyAccumulate(Vector op1, Vector op2, Vector op3) => MatrixMultiplyAccumulate(op1, op2, op3); + + + /// MatrixMultiplyAccumulateUnsignedSigned : Matrix multiply-accumulate (unsigned × signed) + + /// + /// svint32_t svusmmla[_s32](svint32_t op1, svuint8_t op2, svint8_t op3) + /// USMMLA Ztied1.S, Zop2.B, Zop3.B + /// MOVPRFX Zresult, Zop1; USMMLA Zresult.S, Zop2.B, Zop3.B + /// + /// codegenarm64test: + /// IF_SVE_FO_3A USMMLA .S, .B, .B + /// theEmitter->emitIns_R_R_R(INS_sve_usmmla, EA_SCALABLE, REG_V6, REG_V7, REG_V8, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector MatrixMultiplyAccumulateUnsignedSigned(Vector op1, Vector op2, Vector op3) => MatrixMultiplyAccumulateUnsignedSigned(op1, op2, op3); + + } +} + diff --git a/sve_api/out_helper_api/SveNone.cs b/sve_api/out_helper_api/SveNone.cs new file mode 100644 index 0000000000000..dcf166fdcb5e4 --- /dev/null +++ b/sve_api/out_helper_api/SveNone.cs @@ -0,0 +1,2288 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveNone : AdvSimd + { + internal SveNone() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// ChangeOneVectorInATupleOfFourVectors : Change one vector in a tuple of four vectors + + /// + /// svint8x4_t svset4[_s8](svint8x4_t tuple, uint64_t imm_index, svint8_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfFourVectors(tuple1,, imm_index, x); + + /// + /// svint16x4_t svset4[_s16](svint16x4_t tuple, uint64_t imm_index, svint16_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfFourVectors(tuple1,, imm_index, x); + + /// + /// svint32x4_t svset4[_s32](svint32x4_t tuple, uint64_t imm_index, svint32_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfFourVectors(tuple1,, imm_index, x); + + /// + /// svint64x4_t svset4[_s64](svint64x4_t tuple, uint64_t imm_index, svint64_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfFourVectors(tuple1,, imm_index, x); + + /// + /// svuint8x4_t svset4[_u8](svuint8x4_t tuple, uint64_t imm_index, svuint8_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfFourVectors(tuple1,, imm_index, x); + + /// + /// svuint16x4_t svset4[_u16](svuint16x4_t tuple, uint64_t imm_index, svuint16_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfFourVectors(tuple1,, imm_index, x); + + /// + /// svuint32x4_t svset4[_u32](svuint32x4_t tuple, uint64_t imm_index, svuint32_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfFourVectors(tuple1,, imm_index, x); + + /// + /// svuint64x4_t svset4[_u64](svuint64x4_t tuple, uint64_t imm_index, svuint64_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfFourVectors(tuple1,, imm_index, x); + + /// + /// svbfloat16x4_t svset4[_bf16](svbfloat16x4_t tuple, uint64_t imm_index, svbfloat16_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfFourVectors(tuple1,, imm_index, x); + + /// + /// svfloat16x4_t svset4[_f16](svfloat16x4_t tuple, uint64_t imm_index, svfloat16_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfFourVectors(tuple1,, imm_index, x); + + /// + /// svfloat32x4_t svset4[_f32](svfloat32x4_t tuple, uint64_t imm_index, svfloat32_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfFourVectors(tuple1,, imm_index, x); + + /// + /// svfloat64x4_t svset4[_f64](svfloat64x4_t tuple, uint64_t imm_index, svfloat64_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) ChangeOneVectorInATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfFourVectors(tuple1,, imm_index, x); + + + /// ChangeOneVectorInATupleOfThreeVectors : Change one vector in a tuple of three vectors + + /// + /// svint8x3_t svset3[_s8](svint8x3_t tuple, uint64_t imm_index, svint8_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfThreeVectors(tuple1,, imm_index, x); + + /// + /// svint16x3_t svset3[_s16](svint16x3_t tuple, uint64_t imm_index, svint16_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfThreeVectors(tuple1,, imm_index, x); + + /// + /// svint32x3_t svset3[_s32](svint32x3_t tuple, uint64_t imm_index, svint32_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfThreeVectors(tuple1,, imm_index, x); + + /// + /// svint64x3_t svset3[_s64](svint64x3_t tuple, uint64_t imm_index, svint64_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfThreeVectors(tuple1,, imm_index, x); + + /// + /// svuint8x3_t svset3[_u8](svuint8x3_t tuple, uint64_t imm_index, svuint8_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfThreeVectors(tuple1,, imm_index, x); + + /// + /// svuint16x3_t svset3[_u16](svuint16x3_t tuple, uint64_t imm_index, svuint16_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfThreeVectors(tuple1,, imm_index, x); + + /// + /// svuint32x3_t svset3[_u32](svuint32x3_t tuple, uint64_t imm_index, svuint32_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfThreeVectors(tuple1,, imm_index, x); + + /// + /// svuint64x3_t svset3[_u64](svuint64x3_t tuple, uint64_t imm_index, svuint64_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfThreeVectors(tuple1,, imm_index, x); + + /// + /// svbfloat16x3_t svset3[_bf16](svbfloat16x3_t tuple, uint64_t imm_index, svbfloat16_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfThreeVectors(tuple1,, imm_index, x); + + /// + /// svfloat16x3_t svset3[_f16](svfloat16x3_t tuple, uint64_t imm_index, svfloat16_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfThreeVectors(tuple1,, imm_index, x); + + /// + /// svfloat32x3_t svset3[_f32](svfloat32x3_t tuple, uint64_t imm_index, svfloat32_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfThreeVectors(tuple1,, imm_index, x); + + /// + /// svfloat64x3_t svset3[_f64](svfloat64x3_t tuple, uint64_t imm_index, svfloat64_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) ChangeOneVectorInATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfThreeVectors(tuple1,, imm_index, x); + + + /// ChangeOneVectorInATupleOfTwoVectors : Change one vector in a tuple of two vectors + + /// + /// svint8x2_t svset2[_s8](svint8x2_t tuple, uint64_t imm_index, svint8_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfTwoVectors(tuple1,, imm_index, x); + + /// + /// svint16x2_t svset2[_s16](svint16x2_t tuple, uint64_t imm_index, svint16_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfTwoVectors(tuple1,, imm_index, x); + + /// + /// svint32x2_t svset2[_s32](svint32x2_t tuple, uint64_t imm_index, svint32_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfTwoVectors(tuple1,, imm_index, x); + + /// + /// svint64x2_t svset2[_s64](svint64x2_t tuple, uint64_t imm_index, svint64_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfTwoVectors(tuple1,, imm_index, x); + + /// + /// svuint8x2_t svset2[_u8](svuint8x2_t tuple, uint64_t imm_index, svuint8_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfTwoVectors(tuple1,, imm_index, x); + + /// + /// svuint16x2_t svset2[_u16](svuint16x2_t tuple, uint64_t imm_index, svuint16_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfTwoVectors(tuple1,, imm_index, x); + + /// + /// svuint32x2_t svset2[_u32](svuint32x2_t tuple, uint64_t imm_index, svuint32_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfTwoVectors(tuple1,, imm_index, x); + + /// + /// svuint64x2_t svset2[_u64](svuint64x2_t tuple, uint64_t imm_index, svuint64_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfTwoVectors(tuple1,, imm_index, x); + + /// + /// svbfloat16x2_t svset2[_bf16](svbfloat16x2_t tuple, uint64_t imm_index, svbfloat16_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfTwoVectors(tuple1,, imm_index, x); + + /// + /// svfloat16x2_t svset2[_f16](svfloat16x2_t tuple, uint64_t imm_index, svfloat16_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfTwoVectors(tuple1,, imm_index, x); + + /// + /// svfloat32x2_t svset2[_f32](svfloat32x2_t tuple, uint64_t imm_index, svfloat32_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfTwoVectors(tuple1,, imm_index, x); + + /// + /// svfloat64x2_t svset2[_f64](svfloat64x2_t tuple, uint64_t imm_index, svfloat64_t x) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) ChangeOneVectorInATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index, Vector x) => ChangeOneVectorInATupleOfTwoVectors(tuple1,, imm_index, x); + + + /// CreateATupleOfFourVectors : Create a tuple of four vectors + + /// + /// svint8x4_t svcreate4[_s8](svint8_t x0, svint8_t x1, svint8_t x2, svint8_t x3) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) => CreateATupleOfFourVectors(x0, x1, x2, x3); + + /// + /// svint16x4_t svcreate4[_s16](svint16_t x0, svint16_t x1, svint16_t x2, svint16_t x3) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) => CreateATupleOfFourVectors(x0, x1, x2, x3); + + /// + /// svint32x4_t svcreate4[_s32](svint32_t x0, svint32_t x1, svint32_t x2, svint32_t x3) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) => CreateATupleOfFourVectors(x0, x1, x2, x3); + + /// + /// svint64x4_t svcreate4[_s64](svint64_t x0, svint64_t x1, svint64_t x2, svint64_t x3) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) => CreateATupleOfFourVectors(x0, x1, x2, x3); + + /// + /// svuint8x4_t svcreate4[_u8](svuint8_t x0, svuint8_t x1, svuint8_t x2, svuint8_t x3) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) => CreateATupleOfFourVectors(x0, x1, x2, x3); + + /// + /// svuint16x4_t svcreate4[_u16](svuint16_t x0, svuint16_t x1, svuint16_t x2, svuint16_t x3) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) => CreateATupleOfFourVectors(x0, x1, x2, x3); + + /// + /// svuint32x4_t svcreate4[_u32](svuint32_t x0, svuint32_t x1, svuint32_t x2, svuint32_t x3) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) => CreateATupleOfFourVectors(x0, x1, x2, x3); + + /// + /// svuint64x4_t svcreate4[_u64](svuint64_t x0, svuint64_t x1, svuint64_t x2, svuint64_t x3) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) => CreateATupleOfFourVectors(x0, x1, x2, x3); + + /// + /// svbfloat16x4_t svcreate4[_bf16](svbfloat16_t x0, svbfloat16_t x1, svbfloat16_t x2, svbfloat16_t x3) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) => CreateATupleOfFourVectors(x0, x1, x2, x3); + + /// + /// svfloat16x4_t svcreate4[_f16](svfloat16_t x0, svfloat16_t x1, svfloat16_t x2, svfloat16_t x3) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) => CreateATupleOfFourVectors(x0, x1, x2, x3); + + /// + /// svfloat32x4_t svcreate4[_f32](svfloat32_t x0, svfloat32_t x1, svfloat32_t x2, svfloat32_t x3) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) => CreateATupleOfFourVectors(x0, x1, x2, x3); + + /// + /// svfloat64x4_t svcreate4[_f64](svfloat64_t x0, svfloat64_t x1, svfloat64_t x2, svfloat64_t x3) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateATupleOfFourVectors(Vector x0, Vector x1, Vector x2, Vector x3) => CreateATupleOfFourVectors(x0, x1, x2, x3); + + + /// CreateATupleOfThreeVectors : Create a tuple of three vectors + + /// + /// svint8x3_t svcreate3[_s8](svint8_t x0, svint8_t x1, svint8_t x2) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) => CreateATupleOfThreeVectors(x0, x1, x2); + + /// + /// svint16x3_t svcreate3[_s16](svint16_t x0, svint16_t x1, svint16_t x2) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) => CreateATupleOfThreeVectors(x0, x1, x2); + + /// + /// svint32x3_t svcreate3[_s32](svint32_t x0, svint32_t x1, svint32_t x2) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) => CreateATupleOfThreeVectors(x0, x1, x2); + + /// + /// svint64x3_t svcreate3[_s64](svint64_t x0, svint64_t x1, svint64_t x2) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) => CreateATupleOfThreeVectors(x0, x1, x2); + + /// + /// svuint8x3_t svcreate3[_u8](svuint8_t x0, svuint8_t x1, svuint8_t x2) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) => CreateATupleOfThreeVectors(x0, x1, x2); + + /// + /// svuint16x3_t svcreate3[_u16](svuint16_t x0, svuint16_t x1, svuint16_t x2) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) => CreateATupleOfThreeVectors(x0, x1, x2); + + /// + /// svuint32x3_t svcreate3[_u32](svuint32_t x0, svuint32_t x1, svuint32_t x2) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) => CreateATupleOfThreeVectors(x0, x1, x2); + + /// + /// svuint64x3_t svcreate3[_u64](svuint64_t x0, svuint64_t x1, svuint64_t x2) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) => CreateATupleOfThreeVectors(x0, x1, x2); + + /// + /// svbfloat16x3_t svcreate3[_bf16](svbfloat16_t x0, svbfloat16_t x1, svbfloat16_t x2) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) => CreateATupleOfThreeVectors(x0, x1, x2); + + /// + /// svfloat16x3_t svcreate3[_f16](svfloat16_t x0, svfloat16_t x1, svfloat16_t x2) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) => CreateATupleOfThreeVectors(x0, x1, x2); + + /// + /// svfloat32x3_t svcreate3[_f32](svfloat32_t x0, svfloat32_t x1, svfloat32_t x2) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) => CreateATupleOfThreeVectors(x0, x1, x2); + + /// + /// svfloat64x3_t svcreate3[_f64](svfloat64_t x0, svfloat64_t x1, svfloat64_t x2) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) CreateATupleOfThreeVectors(Vector x0, Vector x1, Vector x2) => CreateATupleOfThreeVectors(x0, x1, x2); + + + /// CreateATupleOfTwoVectors : Create a tuple of two vectors + + /// + /// svint8x2_t svcreate2[_s8](svint8_t x0, svint8_t x1) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) => CreateATupleOfTwoVectors(x0, x1); + + /// + /// svint16x2_t svcreate2[_s16](svint16_t x0, svint16_t x1) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) => CreateATupleOfTwoVectors(x0, x1); + + /// + /// svint32x2_t svcreate2[_s32](svint32_t x0, svint32_t x1) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) => CreateATupleOfTwoVectors(x0, x1); + + /// + /// svint64x2_t svcreate2[_s64](svint64_t x0, svint64_t x1) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) => CreateATupleOfTwoVectors(x0, x1); + + /// + /// svuint8x2_t svcreate2[_u8](svuint8_t x0, svuint8_t x1) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) => CreateATupleOfTwoVectors(x0, x1); + + /// + /// svuint16x2_t svcreate2[_u16](svuint16_t x0, svuint16_t x1) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) => CreateATupleOfTwoVectors(x0, x1); + + /// + /// svuint32x2_t svcreate2[_u32](svuint32_t x0, svuint32_t x1) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) => CreateATupleOfTwoVectors(x0, x1); + + /// + /// svuint64x2_t svcreate2[_u64](svuint64_t x0, svuint64_t x1) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) => CreateATupleOfTwoVectors(x0, x1); + + /// + /// svbfloat16x2_t svcreate2[_bf16](svbfloat16_t x0, svbfloat16_t x1) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) => CreateATupleOfTwoVectors(x0, x1); + + /// + /// svfloat16x2_t svcreate2[_f16](svfloat16_t x0, svfloat16_t x1) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) => CreateATupleOfTwoVectors(x0, x1); + + /// + /// svfloat32x2_t svcreate2[_f32](svfloat32_t x0, svfloat32_t x1) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) => CreateATupleOfTwoVectors(x0, x1); + + /// + /// svfloat64x2_t svcreate2[_f64](svfloat64_t x0, svfloat64_t x1) + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) CreateATupleOfTwoVectors(Vector x0, Vector x1) => CreateATupleOfTwoVectors(x0, x1); + + + /// CreateAnUninitializedTupleOfFourVectors : Create an uninitialized tuple of four vectors + + /// + /// svint8x4_t svundef4_s8() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() => CreateAnUninitializedTupleOfFourVectors(); + + /// + /// svint16x4_t svundef4_s16() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() => CreateAnUninitializedTupleOfFourVectors(); + + /// + /// svint32x4_t svundef4_s32() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() => CreateAnUninitializedTupleOfFourVectors(); + + /// + /// svint64x4_t svundef4_s64() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() => CreateAnUninitializedTupleOfFourVectors(); + + /// + /// svuint8x4_t svundef4_u8() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() => CreateAnUninitializedTupleOfFourVectors(); + + /// + /// svuint16x4_t svundef4_u16() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() => CreateAnUninitializedTupleOfFourVectors(); + + /// + /// svuint32x4_t svundef4_u32() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() => CreateAnUninitializedTupleOfFourVectors(); + + /// + /// svuint64x4_t svundef4_u64() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() => CreateAnUninitializedTupleOfFourVectors(); + + /// + /// svbfloat16x4_t svundef4_bf16() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() => CreateAnUninitializedTupleOfFourVectors(); + + /// + /// svfloat16x4_t svundef4_f16() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() => CreateAnUninitializedTupleOfFourVectors(); + + /// + /// svfloat32x4_t svundef4_f32() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() => CreateAnUninitializedTupleOfFourVectors(); + + /// + /// svfloat64x4_t svundef4_f64() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector, Vector) CreateAnUninitializedTupleOfFourVectors() => CreateAnUninitializedTupleOfFourVectors(); + + + /// CreateAnUninitializedTupleOfThreeVectors : Create an uninitialized tuple of three vectors + + /// + /// svint8x3_t svundef3_s8() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() => CreateAnUninitializedTupleOfThreeVectors(); + + /// + /// svint16x3_t svundef3_s16() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() => CreateAnUninitializedTupleOfThreeVectors(); + + /// + /// svint32x3_t svundef3_s32() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() => CreateAnUninitializedTupleOfThreeVectors(); + + /// + /// svint64x3_t svundef3_s64() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() => CreateAnUninitializedTupleOfThreeVectors(); + + /// + /// svuint8x3_t svundef3_u8() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() => CreateAnUninitializedTupleOfThreeVectors(); + + /// + /// svuint16x3_t svundef3_u16() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() => CreateAnUninitializedTupleOfThreeVectors(); + + /// + /// svuint32x3_t svundef3_u32() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() => CreateAnUninitializedTupleOfThreeVectors(); + + /// + /// svuint64x3_t svundef3_u64() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() => CreateAnUninitializedTupleOfThreeVectors(); + + /// + /// svbfloat16x3_t svundef3_bf16() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() => CreateAnUninitializedTupleOfThreeVectors(); + + /// + /// svfloat16x3_t svundef3_f16() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() => CreateAnUninitializedTupleOfThreeVectors(); + + /// + /// svfloat32x3_t svundef3_f32() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() => CreateAnUninitializedTupleOfThreeVectors(); + + /// + /// svfloat64x3_t svundef3_f64() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector, Vector) CreateAnUninitializedTupleOfThreeVectors() => CreateAnUninitializedTupleOfThreeVectors(); + + + /// CreateAnUninitializedTupleOfTwoVectors : Create an uninitialized tuple of two vectors + + /// + /// svint8x2_t svundef2_s8() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() => CreateAnUninitializedTupleOfTwoVectors(); + + /// + /// svint16x2_t svundef2_s16() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() => CreateAnUninitializedTupleOfTwoVectors(); + + /// + /// svint32x2_t svundef2_s32() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() => CreateAnUninitializedTupleOfTwoVectors(); + + /// + /// svint64x2_t svundef2_s64() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() => CreateAnUninitializedTupleOfTwoVectors(); + + /// + /// svuint8x2_t svundef2_u8() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() => CreateAnUninitializedTupleOfTwoVectors(); + + /// + /// svuint16x2_t svundef2_u16() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() => CreateAnUninitializedTupleOfTwoVectors(); + + /// + /// svuint32x2_t svundef2_u32() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() => CreateAnUninitializedTupleOfTwoVectors(); + + /// + /// svuint64x2_t svundef2_u64() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() => CreateAnUninitializedTupleOfTwoVectors(); + + /// + /// svbfloat16x2_t svundef2_bf16() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() => CreateAnUninitializedTupleOfTwoVectors(); + + /// + /// svfloat16x2_t svundef2_f16() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() => CreateAnUninitializedTupleOfTwoVectors(); + + /// + /// svfloat32x2_t svundef2_f32() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() => CreateAnUninitializedTupleOfTwoVectors(); + + /// + /// svfloat64x2_t svundef2_f64() + /// + /// codegenarm64test: + /// + public static unsafe (Vector, Vector) CreateAnUninitializedTupleOfTwoVectors() => CreateAnUninitializedTupleOfTwoVectors(); + + + /// CreateAnUninitializedVector : Create an uninitialized vector + + /// + /// svint8_t svundef_s8() + /// + /// codegenarm64test: + /// + public static unsafe Vector CreateAnUninitializedVector() => CreateAnUninitializedVector(); + + /// + /// svint16_t svundef_s16() + /// + /// codegenarm64test: + /// + public static unsafe Vector CreateAnUninitializedVector() => CreateAnUninitializedVector(); + + /// + /// svint32_t svundef_s32() + /// + /// codegenarm64test: + /// + public static unsafe Vector CreateAnUninitializedVector() => CreateAnUninitializedVector(); + + /// + /// svint64_t svundef_s64() + /// + /// codegenarm64test: + /// + public static unsafe Vector CreateAnUninitializedVector() => CreateAnUninitializedVector(); + + /// + /// svuint8_t svundef_u8() + /// + /// codegenarm64test: + /// + public static unsafe Vector CreateAnUninitializedVector() => CreateAnUninitializedVector(); + + /// + /// svuint16_t svundef_u16() + /// + /// codegenarm64test: + /// + public static unsafe Vector CreateAnUninitializedVector() => CreateAnUninitializedVector(); + + /// + /// svuint32_t svundef_u32() + /// + /// codegenarm64test: + /// + public static unsafe Vector CreateAnUninitializedVector() => CreateAnUninitializedVector(); + + /// + /// svuint64_t svundef_u64() + /// + /// codegenarm64test: + /// + public static unsafe Vector CreateAnUninitializedVector() => CreateAnUninitializedVector(); + + /// + /// svbfloat16_t svundef_bf16() + /// + /// codegenarm64test: + /// + public static unsafe Vector CreateAnUninitializedVector() => CreateAnUninitializedVector(); + + /// + /// svfloat16_t svundef_f16() + /// + /// codegenarm64test: + /// + public static unsafe Vector CreateAnUninitializedVector() => CreateAnUninitializedVector(); + + /// + /// svfloat32_t svundef_f32() + /// + /// codegenarm64test: + /// + public static unsafe Vector CreateAnUninitializedVector() => CreateAnUninitializedVector(); + + /// + /// svfloat64_t svundef_f64() + /// + /// codegenarm64test: + /// + public static unsafe Vector CreateAnUninitializedVector() => CreateAnUninitializedVector(); + + + /// DuplicateSelectedScalarToVector : Broadcast a quadword of scalars + + /// + /// svint8_t svdupq[_n]_s8(int8_t x0, int8_t x1, int8_t x2, int8_t x3, int8_t x4, int8_t x5, int8_t x6, int8_t x7, int8_t x8, int8_t x9, int8_t x10, int8_t x11, int8_t x12, int8_t x13, int8_t x14, int8_t x15) + /// + /// codegenarm64test: + /// + public static unsafe Vector DuplicateSelectedScalarToVector(sbyte x0, [ConstantExpected] byte index, sbyte x2, sbyte x3, sbyte x4, sbyte x5, sbyte x6, sbyte x7, sbyte x8, sbyte x9, sbyte x10, sbyte x11, sbyte x12, sbyte x13, sbyte x14, sbyte x15) => DuplicateSelectedScalarToVector(x0, index, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15); + + /// + /// svint16_t svdupq[_n]_s16(int16_t x0, int16_t x1, int16_t x2, int16_t x3, int16_t x4, int16_t x5, int16_t x6, int16_t x7) + /// + /// codegenarm64test: + /// + public static unsafe Vector DuplicateSelectedScalarToVector(short x0, [ConstantExpected] byte index, short x2, short x3, short x4, short x5, short x6, short x7) => DuplicateSelectedScalarToVector(x0, index, x2, x3, x4, x5, x6, x7); + + /// + /// svint32_t svdupq[_n]_s32(int32_t x0, int32_t x1, int32_t x2, int32_t x3) + /// + /// codegenarm64test: + /// + public static unsafe Vector DuplicateSelectedScalarToVector(int x0, [ConstantExpected] byte index, int x2, int x3) => DuplicateSelectedScalarToVector(x0, index, x2, x3); + + /// + /// svint64_t svdupq[_n]_s64(int64_t x0, int64_t x1) + /// + /// codegenarm64test: + /// + public static unsafe Vector DuplicateSelectedScalarToVector(long x0, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(x0, index); + + /// + /// svuint8_t svdupq[_n]_u8(uint8_t x0, uint8_t x1, uint8_t x2, uint8_t x3, uint8_t x4, uint8_t x5, uint8_t x6, uint8_t x7, uint8_t x8, uint8_t x9, uint8_t x10, uint8_t x11, uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15) + /// + /// codegenarm64test: + /// + public static unsafe Vector DuplicateSelectedScalarToVector(byte x0, [ConstantExpected] byte index, byte x2, byte x3, byte x4, byte x5, byte x6, byte x7, byte x8, byte x9, byte x10, byte x11, byte x12, byte x13, byte x14, byte x15) => DuplicateSelectedScalarToVector(x0, index, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15); + + /// + /// svbool_t svdupq[_n]_b8(bool x0, bool x1, bool x2, bool x3, bool x4, bool x5, bool x6, bool x7, bool x8, bool x9, bool x10, bool x11, bool x12, bool x13, bool x14, bool x15) + /// + /// codegenarm64test: + /// + public static unsafe Vector DuplicateSelectedScalarToVector(bool x0, [ConstantExpected] byte index, bool x2, bool x3, bool x4, bool x5, bool x6, bool x7, bool x8, bool x9, bool x10, bool x11, bool x12, bool x13, bool x14, bool x15) => DuplicateSelectedScalarToVector(x0, index, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15); + + /// + /// svuint16_t svdupq[_n]_u16(uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3, uint16_t x4, uint16_t x5, uint16_t x6, uint16_t x7) + /// + /// codegenarm64test: + /// + public static unsafe Vector DuplicateSelectedScalarToVector(ushort x0, [ConstantExpected] byte index, ushort x2, ushort x3, ushort x4, ushort x5, ushort x6, ushort x7) => DuplicateSelectedScalarToVector(x0, index, x2, x3, x4, x5, x6, x7); + + /// + /// svbool_t svdupq[_n]_b16(bool x0, bool x1, bool x2, bool x3, bool x4, bool x5, bool x6, bool x7) + /// + /// codegenarm64test: + /// + public static unsafe Vector DuplicateSelectedScalarToVector(bool x0, [ConstantExpected] byte index, bool x2, bool x3, bool x4, bool x5, bool x6, bool x7) => DuplicateSelectedScalarToVector(x0, index, x2, x3, x4, x5, x6, x7); + + /// + /// svuint32_t svdupq[_n]_u32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3) + /// + /// codegenarm64test: + /// + public static unsafe Vector DuplicateSelectedScalarToVector(uint x0, [ConstantExpected] byte index, uint x2, uint x3) => DuplicateSelectedScalarToVector(x0, index, x2, x3); + + /// + /// svbool_t svdupq[_n]_b32(bool x0, bool x1, bool x2, bool x3) + /// + /// codegenarm64test: + /// + public static unsafe Vector DuplicateSelectedScalarToVector(bool x0, [ConstantExpected] byte index, bool x2, bool x3) => DuplicateSelectedScalarToVector(x0, index, x2, x3); + + /// + /// svuint64_t svdupq[_n]_u64(uint64_t x0, uint64_t x1) + /// + /// codegenarm64test: + /// + public static unsafe Vector DuplicateSelectedScalarToVector(ulong x0, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(x0, index); + + /// + /// svbool_t svdupq[_n]_b64(bool x0, bool x1) + /// + /// codegenarm64test: + /// + public static unsafe Vector DuplicateSelectedScalarToVector(bool x0, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(x0, index); + + /// + /// svbfloat16_t svdupq[_n]_bf16(bfloat16_t x0, bfloat16_t x1, bfloat16_t x2, bfloat16_t x3, bfloat16_t x4, bfloat16_t x5, bfloat16_t x6, bfloat16_t x7) + /// + /// codegenarm64test: + /// + public static unsafe Vector DuplicateSelectedScalarToVector(bfloat16 x0, [ConstantExpected] byte index, bfloat16 x2, bfloat16 x3, bfloat16 x4, bfloat16 x5, bfloat16 x6, bfloat16 x7) => DuplicateSelectedScalarToVector(x0, index, x2, x3, x4, x5, x6, x7); + + /// + /// svfloat16_t svdupq[_n]_f16(float16_t x0, float16_t x1, float16_t x2, float16_t x3, float16_t x4, float16_t x5, float16_t x6, float16_t x7) + /// + /// codegenarm64test: + /// + public static unsafe Vector DuplicateSelectedScalarToVector(half x0, [ConstantExpected] byte index, half x2, half x3, half x4, half x5, half x6, half x7) => DuplicateSelectedScalarToVector(x0, index, x2, x3, x4, x5, x6, x7); + + /// + /// svfloat32_t svdupq[_n]_f32(float32_t x0, float32_t x1, float32_t x2, float32_t x3) + /// + /// codegenarm64test: + /// + public static unsafe Vector DuplicateSelectedScalarToVector(float x0, [ConstantExpected] byte index, float x2, float x3) => DuplicateSelectedScalarToVector(x0, index, x2, x3); + + /// + /// svfloat64_t svdupq[_n]_f64(float64_t x0, float64_t x1) + /// + /// codegenarm64test: + /// + public static unsafe Vector DuplicateSelectedScalarToVector(double x0, [ConstantExpected] byte index) => DuplicateSelectedScalarToVector(x0, index); + + + /// ExtractOneVectorFromATupleOfFourVectors : Extract one vector from a tuple of four vectors + + /// + /// svint8_t svget4[_s8](svint8x4_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) => ExtractOneVectorFromATupleOfFourVectors(tuple1,, imm_index); + + /// + /// svint16_t svget4[_s16](svint16x4_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) => ExtractOneVectorFromATupleOfFourVectors(tuple1,, imm_index); + + /// + /// svint32_t svget4[_s32](svint32x4_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) => ExtractOneVectorFromATupleOfFourVectors(tuple1,, imm_index); + + /// + /// svint64_t svget4[_s64](svint64x4_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) => ExtractOneVectorFromATupleOfFourVectors(tuple1,, imm_index); + + /// + /// svuint8_t svget4[_u8](svuint8x4_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) => ExtractOneVectorFromATupleOfFourVectors(tuple1,, imm_index); + + /// + /// svuint16_t svget4[_u16](svuint16x4_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) => ExtractOneVectorFromATupleOfFourVectors(tuple1,, imm_index); + + /// + /// svuint32_t svget4[_u32](svuint32x4_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) => ExtractOneVectorFromATupleOfFourVectors(tuple1,, imm_index); + + /// + /// svuint64_t svget4[_u64](svuint64x4_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) => ExtractOneVectorFromATupleOfFourVectors(tuple1,, imm_index); + + /// + /// svbfloat16_t svget4[_bf16](svbfloat16x4_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) => ExtractOneVectorFromATupleOfFourVectors(tuple1,, imm_index); + + /// + /// svfloat16_t svget4[_f16](svfloat16x4_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) => ExtractOneVectorFromATupleOfFourVectors(tuple1,, imm_index); + + /// + /// svfloat32_t svget4[_f32](svfloat32x4_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) => ExtractOneVectorFromATupleOfFourVectors(tuple1,, imm_index); + + /// + /// svfloat64_t svget4[_f64](svfloat64x4_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfFourVectors((Vector tuple1, Vector tuple2, Vector tuple3, Vector tuple4), ulong imm_index) => ExtractOneVectorFromATupleOfFourVectors(tuple1,, imm_index); + + + /// ExtractOneVectorFromATupleOfThreeVectors : Extract one vector from a tuple of three vectors + + /// + /// svint8_t svget3[_s8](svint8x3_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) => ExtractOneVectorFromATupleOfThreeVectors(tuple1,, imm_index); + + /// + /// svint16_t svget3[_s16](svint16x3_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) => ExtractOneVectorFromATupleOfThreeVectors(tuple1,, imm_index); + + /// + /// svint32_t svget3[_s32](svint32x3_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) => ExtractOneVectorFromATupleOfThreeVectors(tuple1,, imm_index); + + /// + /// svint64_t svget3[_s64](svint64x3_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) => ExtractOneVectorFromATupleOfThreeVectors(tuple1,, imm_index); + + /// + /// svuint8_t svget3[_u8](svuint8x3_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) => ExtractOneVectorFromATupleOfThreeVectors(tuple1,, imm_index); + + /// + /// svuint16_t svget3[_u16](svuint16x3_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) => ExtractOneVectorFromATupleOfThreeVectors(tuple1,, imm_index); + + /// + /// svuint32_t svget3[_u32](svuint32x3_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) => ExtractOneVectorFromATupleOfThreeVectors(tuple1,, imm_index); + + /// + /// svuint64_t svget3[_u64](svuint64x3_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) => ExtractOneVectorFromATupleOfThreeVectors(tuple1,, imm_index); + + /// + /// svbfloat16_t svget3[_bf16](svbfloat16x3_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) => ExtractOneVectorFromATupleOfThreeVectors(tuple1,, imm_index); + + /// + /// svfloat16_t svget3[_f16](svfloat16x3_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) => ExtractOneVectorFromATupleOfThreeVectors(tuple1,, imm_index); + + /// + /// svfloat32_t svget3[_f32](svfloat32x3_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) => ExtractOneVectorFromATupleOfThreeVectors(tuple1,, imm_index); + + /// + /// svfloat64_t svget3[_f64](svfloat64x3_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfThreeVectors((Vector tuple1, Vector tuple2, Vector tuple3), ulong imm_index) => ExtractOneVectorFromATupleOfThreeVectors(tuple1,, imm_index); + + + /// ExtractOneVectorFromATupleOfTwoVectors : Extract one vector from a tuple of two vectors + + /// + /// svint8_t svget2[_s8](svint8x2_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) => ExtractOneVectorFromATupleOfTwoVectors(tuple1,, imm_index); + + /// + /// svint16_t svget2[_s16](svint16x2_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) => ExtractOneVectorFromATupleOfTwoVectors(tuple1,, imm_index); + + /// + /// svint32_t svget2[_s32](svint32x2_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) => ExtractOneVectorFromATupleOfTwoVectors(tuple1,, imm_index); + + /// + /// svint64_t svget2[_s64](svint64x2_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) => ExtractOneVectorFromATupleOfTwoVectors(tuple1,, imm_index); + + /// + /// svuint8_t svget2[_u8](svuint8x2_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) => ExtractOneVectorFromATupleOfTwoVectors(tuple1,, imm_index); + + /// + /// svuint16_t svget2[_u16](svuint16x2_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) => ExtractOneVectorFromATupleOfTwoVectors(tuple1,, imm_index); + + /// + /// svuint32_t svget2[_u32](svuint32x2_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) => ExtractOneVectorFromATupleOfTwoVectors(tuple1,, imm_index); + + /// + /// svuint64_t svget2[_u64](svuint64x2_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) => ExtractOneVectorFromATupleOfTwoVectors(tuple1,, imm_index); + + /// + /// svbfloat16_t svget2[_bf16](svbfloat16x2_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) => ExtractOneVectorFromATupleOfTwoVectors(tuple1,, imm_index); + + /// + /// svfloat16_t svget2[_f16](svfloat16x2_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) => ExtractOneVectorFromATupleOfTwoVectors(tuple1,, imm_index); + + /// + /// svfloat32_t svget2[_f32](svfloat32x2_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) => ExtractOneVectorFromATupleOfTwoVectors(tuple1,, imm_index); + + /// + /// svfloat64_t svget2[_f64](svfloat64x2_t tuple, uint64_t imm_index) + /// + /// codegenarm64test: + /// + public static unsafe Vector ExtractOneVectorFromATupleOfTwoVectors((Vector tuple1, Vector tuple2), ulong imm_index) => ExtractOneVectorFromATupleOfTwoVectors(tuple1,, imm_index); + + + /// ReinterpretVectorContents : Reinterpret vector contents + + /// + /// svint8_t svreinterpret_s8[_bf16](svbfloat16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint8_t svreinterpret_s8[_f16](svfloat16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint8_t svreinterpret_s8[_f32](svfloat32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint8_t svreinterpret_s8[_f64](svfloat64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint8_t svreinterpret_s8[_s8](svint8_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint8_t svreinterpret_s8[_s16](svint16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint8_t svreinterpret_s8[_s32](svint32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint8_t svreinterpret_s8[_s64](svint64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint8_t svreinterpret_s8[_u8](svuint8_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint8_t svreinterpret_s8[_u16](svuint16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint8_t svreinterpret_s8[_u32](svuint32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint8_t svreinterpret_s8[_u64](svuint64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint16_t svreinterpret_s16[_s8](svint8_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint16_t svreinterpret_s16[_s32](svint32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint16_t svreinterpret_s16[_s64](svint64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint16_t svreinterpret_s16[_u8](svuint8_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint16_t svreinterpret_s16[_u16](svuint16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint16_t svreinterpret_s16[_u32](svuint32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint16_t svreinterpret_s16[_bf16](svbfloat16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint16_t svreinterpret_s16[_f16](svfloat16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint16_t svreinterpret_s16[_f32](svfloat32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint16_t svreinterpret_s16[_f64](svfloat64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint16_t svreinterpret_s16[_s16](svint16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint16_t svreinterpret_s16[_u64](svuint64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint32_t svreinterpret_s32[_s8](svint8_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint32_t svreinterpret_s32[_s16](svint16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint32_t svreinterpret_s32[_s64](svint64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint32_t svreinterpret_s32[_u8](svuint8_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint32_t svreinterpret_s32[_u16](svuint16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint32_t svreinterpret_s32[_u32](svuint32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint32_t svreinterpret_s32[_u64](svuint64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint32_t svreinterpret_s32[_bf16](svbfloat16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint32_t svreinterpret_s32[_f16](svfloat16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint32_t svreinterpret_s32[_f32](svfloat32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint32_t svreinterpret_s32[_f64](svfloat64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint32_t svreinterpret_s32[_s32](svint32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint64_t svreinterpret_s64[_s8](svint8_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint64_t svreinterpret_s64[_s16](svint16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint64_t svreinterpret_s64[_s32](svint32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint64_t svreinterpret_s64[_u8](svuint8_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint64_t svreinterpret_s64[_u16](svuint16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint64_t svreinterpret_s64[_u32](svuint32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint64_t svreinterpret_s64[_u64](svuint64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint64_t svreinterpret_s64[_bf16](svbfloat16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint64_t svreinterpret_s64[_f16](svfloat16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint64_t svreinterpret_s64[_f32](svfloat32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint64_t svreinterpret_s64[_f64](svfloat64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svint64_t svreinterpret_s64[_s64](svint64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint8_t svreinterpret_u8[_s8](svint8_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint8_t svreinterpret_u8[_s16](svint16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint8_t svreinterpret_u8[_s32](svint32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint8_t svreinterpret_u8[_s64](svint64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint8_t svreinterpret_u8[_u16](svuint16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint8_t svreinterpret_u8[_u32](svuint32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint8_t svreinterpret_u8[_u64](svuint64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint8_t svreinterpret_u8[_bf16](svbfloat16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint8_t svreinterpret_u8[_f16](svfloat16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint8_t svreinterpret_u8[_f32](svfloat32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint8_t svreinterpret_u8[_f64](svfloat64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint8_t svreinterpret_u8[_u8](svuint8_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint16_t svreinterpret_u16[_s8](svint8_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint16_t svreinterpret_u16[_s16](svint16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint16_t svreinterpret_u16[_s32](svint32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint16_t svreinterpret_u16[_s64](svint64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint16_t svreinterpret_u16[_u8](svuint8_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint16_t svreinterpret_u16[_u32](svuint32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint16_t svreinterpret_u16[_u64](svuint64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint16_t svreinterpret_u16[_bf16](svbfloat16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint16_t svreinterpret_u16[_f16](svfloat16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint16_t svreinterpret_u16[_f32](svfloat32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint16_t svreinterpret_u16[_f64](svfloat64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint16_t svreinterpret_u16[_u16](svuint16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint32_t svreinterpret_u32[_s8](svint8_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint32_t svreinterpret_u32[_s16](svint16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint32_t svreinterpret_u32[_s32](svint32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint32_t svreinterpret_u32[_s64](svint64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint32_t svreinterpret_u32[_u8](svuint8_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint32_t svreinterpret_u32[_u16](svuint16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint32_t svreinterpret_u32[_u64](svuint64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint32_t svreinterpret_u32[_bf16](svbfloat16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint32_t svreinterpret_u32[_f16](svfloat16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint32_t svreinterpret_u32[_f32](svfloat32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint32_t svreinterpret_u32[_f64](svfloat64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint32_t svreinterpret_u32[_u32](svuint32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint64_t svreinterpret_u64[_s8](svint8_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint64_t svreinterpret_u64[_s16](svint16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint64_t svreinterpret_u64[_s32](svint32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint64_t svreinterpret_u64[_s64](svint64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint64_t svreinterpret_u64[_u8](svuint8_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint64_t svreinterpret_u64[_u16](svuint16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint64_t svreinterpret_u64[_u32](svuint32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint64_t svreinterpret_u64[_bf16](svbfloat16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint64_t svreinterpret_u64[_f16](svfloat16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint64_t svreinterpret_u64[_f32](svfloat32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint64_t svreinterpret_u64[_f64](svfloat64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svuint64_t svreinterpret_u64[_u64](svuint64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svbfloat16_t svreinterpret_bf16[_bf16](svbfloat16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svbfloat16_t svreinterpret_bf16[_s8](svint8_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svbfloat16_t svreinterpret_bf16[_s16](svint16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svbfloat16_t svreinterpret_bf16[_s32](svint32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svbfloat16_t svreinterpret_bf16[_s64](svint64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svbfloat16_t svreinterpret_bf16[_u8](svuint8_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svbfloat16_t svreinterpret_bf16[_u16](svuint16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svbfloat16_t svreinterpret_bf16[_u32](svuint32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svbfloat16_t svreinterpret_bf16[_u64](svuint64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svbfloat16_t svreinterpret_bf16[_f16](svfloat16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svbfloat16_t svreinterpret_bf16[_f32](svfloat32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svbfloat16_t svreinterpret_bf16[_f64](svfloat64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat16_t svreinterpret_f16[_s8](svint8_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat16_t svreinterpret_f16[_s16](svint16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat16_t svreinterpret_f16[_s32](svint32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat16_t svreinterpret_f16[_u8](svuint8_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat16_t svreinterpret_f16[_u16](svuint16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat16_t svreinterpret_f16[_u32](svuint32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat16_t svreinterpret_f16[_bf16](svbfloat16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat16_t svreinterpret_f16[_f16](svfloat16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat16_t svreinterpret_f16[_s64](svint64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat16_t svreinterpret_f16[_u64](svuint64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat16_t svreinterpret_f16[_f32](svfloat32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat16_t svreinterpret_f16[_f64](svfloat64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat32_t svreinterpret_f32[_s8](svint8_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat32_t svreinterpret_f32[_s16](svint16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat32_t svreinterpret_f32[_s32](svint32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat32_t svreinterpret_f32[_s64](svint64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat32_t svreinterpret_f32[_u8](svuint8_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat32_t svreinterpret_f32[_u16](svuint16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat32_t svreinterpret_f32[_u32](svuint32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat32_t svreinterpret_f32[_u64](svuint64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat32_t svreinterpret_f32[_bf16](svbfloat16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat32_t svreinterpret_f32[_f16](svfloat16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat32_t svreinterpret_f32[_f32](svfloat32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat32_t svreinterpret_f32[_f64](svfloat64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat64_t svreinterpret_f64[_bf16](svbfloat16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat64_t svreinterpret_f64[_f16](svfloat16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat64_t svreinterpret_f64[_f32](svfloat32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat64_t svreinterpret_f64[_f64](svfloat64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat64_t svreinterpret_f64[_s8](svint8_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat64_t svreinterpret_f64[_s16](svint16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat64_t svreinterpret_f64[_s32](svint32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat64_t svreinterpret_f64[_s64](svint64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat64_t svreinterpret_f64[_u8](svuint8_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat64_t svreinterpret_f64[_u16](svuint16_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat64_t svreinterpret_f64[_u32](svuint32_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + /// + /// svfloat64_t svreinterpret_f64[_u64](svuint64_t op) + /// + /// codegenarm64test: + /// + public static unsafe Vector ReinterpretVectorContents(Vector value) => ReinterpretVectorContents(value); + + } +} + diff --git a/sve_api/out_helper_api/SveSha3.cs b/sve_api/out_helper_api/SveSha3.cs new file mode 100644 index 0000000000000..9158684f1ad88 --- /dev/null +++ b/sve_api/out_helper_api/SveSha3.cs @@ -0,0 +1,54 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveSha3 : AdvSimd + { + internal SveSha3() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// BitwiseRotateLeftBy1AndXor : Bitwise rotate left by 1 and exclusive OR + + /// + /// svint64_t svrax1[_s64](svint64_t op1, svint64_t op2) + /// RAX1 Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_GJ_3A RAX1 .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_rax1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseRotateLeftBy1AndXor(Vector left, Vector right) => BitwiseRotateLeftBy1AndXor(left, right); + + /// + /// svuint64_t svrax1[_u64](svuint64_t op1, svuint64_t op2) + /// RAX1 Zresult.D, Zop1.D, Zop2.D + /// + /// codegenarm64test: + /// IF_SVE_GJ_3A RAX1 .D, .D, .D + /// theEmitter->emitIns_R_R_R(INS_sve_rax1, EA_SCALABLE, REG_V0, REG_V1, REG_V2, INS_OPTS_SCALABLE_D); + /// + public static unsafe Vector BitwiseRotateLeftBy1AndXor(Vector left, Vector right) => BitwiseRotateLeftBy1AndXor(left, right); + + } +} + diff --git a/sve_api/out_helper_api/SveSm4.cs b/sve_api/out_helper_api/SveSm4.cs new file mode 100644 index 0000000000000..3acf422e77c08 --- /dev/null +++ b/sve_api/out_helper_api/SveSm4.cs @@ -0,0 +1,57 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.Intrinsics; +using System.Numerics; + +namespace System.Runtime.Intrinsics.Arm +{ + /// + /// This class provides access to the ARM SVE hardware instructions via intrinsics + /// + [Intrinsic] + [CLSCompliant(false)] + public abstract class SveSm4 : AdvSimd + { + internal SveSm4() { } + + public static new bool IsSupported { get => IsSupported; } + + [Intrinsic] + public new abstract class Arm64 : AdvSimd.Arm64 + { + internal Arm64() { } + + public static new bool IsSupported { get => IsSupported; } + } + + /// Sm4EncryptionAndDecryption : SM4 encryption and decryption + + /// + /// svuint32_t svsm4e[_u32](svuint32_t op1, svuint32_t op2) + /// SM4E Ztied1.S, Ztied1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_GK_2A SM4E .S, + /// theEmitter->emitIns_R_R(INS_sve_sm4e, EA_SCALABLE, REG_V3, REG_V5, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector Sm4EncryptionAndDecryption(Vector left, Vector right) => Sm4EncryptionAndDecryption(left, right); + + + /// Sm4KeyUpdates : SM4 key updates + + /// + /// svuint32_t svsm4ekey[_u32](svuint32_t op1, svuint32_t op2) + /// SM4EKEY Zresult.S, Zop1.S, Zop2.S + /// + /// codegenarm64test: + /// IF_SVE_GJ_3A SM4EKEY .S, .S, .S + /// theEmitter->emitIns_R_R_R(INS_sve_sm4ekey, EA_SCALABLE, REG_V3, REG_V4, REG_V5, INS_OPTS_SCALABLE_S); + /// + public static unsafe Vector Sm4KeyUpdates(Vector left, Vector right) => Sm4KeyUpdates(left, right); + + } +} + diff --git a/sve_api/out_hwintrinsiclistarm64sve.h b/sve_api/out_hwintrinsiclistarm64sve.h new file mode 100644 index 0000000000000..fd1d0027ff2da --- /dev/null +++ b/sve_api/out_hwintrinsiclistarm64sve.h @@ -0,0 +1,753 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MIT license. + +/*****************************************************************************/ +#ifndef HARDWARE_INTRINSIC +#error Define HARDWARE_INTRINSIC before including this file +#endif +/*****************************************************************************/ + +// clang-format off + +#ifdef FEATURE_HW_INTRINSICS + +// *************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************** +// ISA Function name SIMD size NumArg EncodesExtraTypeArg Instructions Category Flags +// {TYP_BYTE, TYP_UBYTE, TYP_SHORT, TYP_USHORT, TYP_INT, TYP_UINT, TYP_LONG, TYP_ULONG, TYP_FLOAT, TYP_DOUBLE} +// *************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************** +// Sve +HARDWARE_INTRINSIC(Sve, Abs, -1, 1, true, {INS_sve_abs, INS_invalid, INS_sve_abs, INS_invalid, INS_sve_abs, INS_invalid, INS_sve_abs, INS_invalid, INS_sve_fabs, INS_sve_fabs}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, AbsoluteCompareGreaterThan, -1, 2, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_facgt, INS_sve_facgt}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, AbsoluteCompareGreaterThanOrEqual, -1, 2, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_facge, INS_sve_facge}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, AbsoluteCompareLessThan, -1, 2, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_facgt, INS_sve_facgt}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, AbsoluteCompareLessThanOrEqual, -1, 2, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_facge, INS_sve_facge}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, AbsoluteDifference, -1, 2, true, {INS_sve_sabd, INS_sve_uabd, INS_sve_sabd, INS_sve_uabd, INS_sve_sabd, INS_sve_uabd, INS_sve_sabd, INS_sve_uabd, INS_sve_fabd, INS_sve_fabd}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, Add, -1, 2, true, {INS_sve_add, INS_sve_add, INS_sve_add, INS_sve_add, INS_sve_add, INS_sve_add, INS_sve_add, INS_sve_add, INS_sve_fadd, INS_sve_fadd}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, AddAcross, -1, 1, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_saddv/INS_sve_uaddv,INS_sve_uaddv, INS_sve_faddv, INS_sve_faddv}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, AddRotateComplex, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fcadd, INS_sve_fcadd}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, AddSaturate, -1, 2, true, {INS_sve_sqadd, INS_sve_uqadd, INS_sve_sqadd, INS_sve_uqadd, INS_sve_sqadd, INS_sve_uqadd, INS_sve_sqadd, INS_sve_uqadd, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, AddSequentialAcross, -1, 2, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fadda, INS_sve_fadda}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, And, -1, 2, true, {INS_sve_and, INS_sve_and, INS_sve_and, INS_sve_and, INS_sve_and, INS_sve_and, INS_sve_and, INS_sve_and, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, AndAcross, -1, 1, true, {INS_sve_andv, INS_sve_andv, INS_sve_andv, INS_sve_andv, INS_sve_andv, INS_sve_andv, INS_sve_andv, INS_sve_andv, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, AndNot, -1, 2, true, {INS_sve_nand, INS_sve_nand, INS_sve_nand, INS_sve_nand, INS_sve_nand, INS_sve_nand, INS_sve_nand, INS_sve_nand, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, BitwiseClear, -1, 2, true, {INS_sve_bic, INS_sve_bic, INS_sve_bic, INS_sve_bic, INS_sve_bic, INS_sve_bic, INS_sve_bic, INS_sve_bic, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, BooleanNot, -1, 1, true, {INS_sve_cnot, INS_sve_cnot, INS_sve_cnot, INS_sve_cnot, INS_sve_cnot, INS_sve_cnot, INS_sve_cnot, INS_sve_cnot, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, Compact, -1, 2, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_compact, INS_sve_compact, INS_sve_compact, INS_sve_compact, INS_sve_compact, INS_sve_compact}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CompareEqual, -1, 2, true, {INS_sve_cmpeq, INS_sve_cmpeq, INS_sve_cmpeq, INS_sve_cmpeq, INS_sve_cmpeq, INS_sve_cmpeq, INS_sve_cmpeq, INS_sve_cmpeq, INS_sve_fcmeq, INS_sve_fcmeq}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, CompareGreaterThan, -1, 2, true, {INS_sve_cmpgt, INS_sve_cmphi, INS_sve_cmpgt, INS_sve_cmphi, INS_sve_cmpgt, INS_sve_cmphi, INS_sve_cmpgt, INS_sve_cmphi, INS_sve_fcmgt, INS_sve_fcmgt}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, CompareGreaterThanOrEqual, -1, 2, true, {INS_sve_cmpge, INS_sve_cmphs, INS_sve_cmpge, INS_sve_cmphs, INS_sve_cmpge, INS_sve_cmphs, INS_sve_cmpge, INS_sve_cmphs, INS_sve_fcmge, INS_sve_fcmge}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, CompareLessThan, -1, 2, true, {INS_sve_cmpgt/INS_sve_cmplt,INS_sve_cmphi/INS_sve_cmplo,INS_sve_cmpgt/INS_sve_cmplt,INS_sve_cmphi/INS_sve_cmplo,INS_sve_cmpgt/INS_sve_cmplt,INS_sve_cmphi/INS_sve_cmplo,INS_sve_cmpgt, INS_sve_cmphi, INS_sve_fcmgt, INS_sve_fcmgt}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, CompareLessThanOrEqual, -1, 2, true, {INS_sve_cmpge/INS_sve_cmple,INS_sve_cmphs/INS_sve_cmpls,INS_sve_cmpge/INS_sve_cmple,INS_sve_cmphs/INS_sve_cmpls,INS_sve_cmpge/INS_sve_cmple,INS_sve_cmphs/INS_sve_cmpls,INS_sve_cmpge, INS_sve_cmphs, INS_sve_fcmge, INS_sve_fcmge}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, CompareNotEqualTo, -1, 2, true, {INS_sve_cmpne, INS_sve_cmpne, INS_sve_cmpne, INS_sve_cmpne, INS_sve_cmpne, INS_sve_cmpne, INS_sve_cmpne, INS_sve_cmpne, INS_sve_fcmne, INS_sve_fcmne}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, CompareUnordered, -1, 2, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fcmuo, INS_sve_fcmuo}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, Compute16BitAddresses, -1, 2, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_adr, INS_invalid, INS_sve_adr, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, Compute32BitAddresses, -1, 2, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_adr, INS_invalid, INS_sve_adr, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, Compute64BitAddresses, -1, 2, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_adr, INS_invalid, INS_sve_adr, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, Compute8BitAddresses, -1, 2, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_adr, INS_invalid, INS_sve_adr, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, ConditionalExtractAfterLastActiveElement, -1, 3, true, {INS_sve_clasta, INS_sve_clasta, INS_sve_clasta, INS_sve_clasta, INS_sve_clasta, INS_sve_clasta, INS_sve_clasta, INS_sve_clasta, INS_sve_clasta, INS_sve_clasta}, HW_Category_SIMD, HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, ConditionalExtractAfterLastActiveElementAndReplicate, -1, 3, true, {INS_sve_clasta, INS_sve_clasta, INS_sve_clasta, INS_sve_clasta, INS_sve_clasta, INS_sve_clasta, INS_sve_clasta, INS_sve_clasta, INS_sve_clasta, INS_sve_clasta}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, ConditionalExtractLastActiveElement, -1, 3, true, {INS_sve_clastb, INS_sve_clastb, INS_sve_clastb, INS_sve_clastb, INS_sve_clastb, INS_sve_clastb, INS_sve_clastb, INS_sve_clastb, INS_sve_clastb, INS_sve_clastb}, HW_Category_SIMD, HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, ConditionalExtractLastActiveElementAndReplicate, -1, 3, true, {INS_sve_clastb, INS_sve_clastb, INS_sve_clastb, INS_sve_clastb, INS_sve_clastb, INS_sve_clastb, INS_sve_clastb, INS_sve_clastb, INS_sve_clastb, INS_sve_clastb}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, ConditionalSelect, -1, 3, true, {INS_sve_sel, INS_sve_sel, INS_sve_sel, INS_sve_sel, INS_sve_sel, INS_sve_sel, INS_sve_sel, INS_sve_sel, INS_sve_sel, INS_sve_sel}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, ConvertToDouble, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fcvt/INS_sve_scvtf/INS_sve_ucvtf},HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, ConvertToInt32, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fcvtzs, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, ConvertToInt64, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fcvtzs, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, ConvertToSingle, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fcvt/INS_sve_scvtf/INS_sve_ucvtf,INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, ConvertToUInt32, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fcvtzu, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, ConvertToUInt64, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fcvtzu, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, Count16BitElements, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, Count32BitElements, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, Count64BitElements, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, Count8BitElements, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateBreakAfterMask, -1, 2, true, {INS_sve_brka, INS_sve_brka, INS_sve_brka, INS_sve_brka, INS_sve_brka, INS_sve_brka, INS_sve_brka, INS_sve_brka, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateBreakAfterPropagateMask, -1, 3, true, {INS_sve_brkpa, INS_sve_brkpa, INS_sve_brkpa, INS_sve_brkpa, INS_sve_brkpa, INS_sve_brkpa, INS_sve_brkpa, INS_sve_brkpa, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateBreakBeforeMask, -1, 2, true, {INS_sve_brkb, INS_sve_brkb, INS_sve_brkb, INS_sve_brkb, INS_sve_brkb, INS_sve_brkb, INS_sve_brkb, INS_sve_brkb, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateBreakBeforePropagateMask, -1, 3, true, {INS_sve_brkpb, INS_sve_brkpb, INS_sve_brkpb, INS_sve_brkpb, INS_sve_brkpb, INS_sve_brkpb, INS_sve_brkpb, INS_sve_brkpb, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateBreakPropagateMask, -1, 2, true, {INS_sve_brkn, INS_sve_brkn, INS_sve_brkn, INS_sve_brkn, INS_sve_brkn, INS_sve_brkn, INS_sve_brkn, INS_sve_brkn, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateFalseMaskByte, -1, 0, false, {INS_invalid, INS_sve_pfalse, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateFalseMaskDouble, -1, 0, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_pfalse}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateFalseMaskInt16, -1, 0, false, {INS_invalid, INS_invalid, INS_sve_pfalse, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateFalseMaskInt32, -1, 0, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_pfalse, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateFalseMaskInt64, -1, 0, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_pfalse, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateFalseMaskSByte, -1, 0, false, {INS_sve_pfalse, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateFalseMaskSingle, -1, 0, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_pfalse, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateFalseMaskUInt16, -1, 0, false, {INS_invalid, INS_invalid, INS_invalid, INS_sve_pfalse, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateFalseMaskUInt32, -1, 0, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_pfalse, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateFalseMaskUInt64, -1, 0, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_pfalse, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateMaskForFirstActiveElement, -1, 2, true, {INS_sve_pfirst, INS_sve_pfirst, INS_sve_pfirst, INS_sve_pfirst, INS_sve_pfirst, INS_sve_pfirst, INS_sve_pfirst, INS_sve_pfirst, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateMaskForNextActiveElement, -1, 2, true, {INS_invalid, INS_sve_pnext, INS_invalid, INS_sve_pnext, INS_invalid, INS_sve_pnext, INS_invalid, INS_sve_pnext, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateTrueMaskByte, -1, 1, false, {INS_invalid, INS_sve_ptrue, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateTrueMaskDouble, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ptrue}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateTrueMaskInt16, -1, 1, false, {INS_invalid, INS_invalid, INS_sve_ptrue, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateTrueMaskInt32, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ptrue, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateTrueMaskInt64, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ptrue, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateTrueMaskSByte, -1, 1, false, {INS_sve_ptrue, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateTrueMaskSingle, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ptrue, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateTrueMaskUInt16, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_sve_ptrue, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateTrueMaskUInt32, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ptrue, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateTrueMaskUInt64, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ptrue, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, CreateWhileLessThanMask16Bit, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_sve_whilelt/INS_sve_whilelo,INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, CreateWhileLessThanMask32Bit, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_whilelt/INS_sve_whilelo,INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, CreateWhileLessThanMask64Bit, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_whilelt/INS_sve_whilelo,INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, CreateWhileLessThanMask8Bit, -1, 2, false, {INS_invalid, INS_sve_whilelt/INS_sve_whilelo,INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, CreateWhileLessThanOrEqualMask16Bit, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_sve_whilele/INS_sve_whilels,INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, CreateWhileLessThanOrEqualMask32Bit, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_whilele/INS_sve_whilels,INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, CreateWhileLessThanOrEqualMask64Bit, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_whilele/INS_sve_whilels,INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, CreateWhileLessThanOrEqualMask8Bit, -1, 2, false, {INS_invalid, INS_sve_whilele/INS_sve_whilels,INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, Divide, -1, 2, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_sdiv, INS_sve_udiv, INS_sve_sdiv, INS_sve_udiv, INS_sve_fdiv, INS_sve_fdiv}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, DotProduct, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_sdot, INS_sve_udot, INS_sve_sdot, INS_sve_udot, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve, DotProductBySelectedScalar, -1, 4, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_sdot, INS_sve_udot, INS_sve_sdot, INS_sve_udot, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve, DuplicateSelectedScalarToVector, -1, 2, true, {INS_sve_dup/INS_sve_tbl,INS_sve_dup/INS_sve_tbl,INS_sve_dup/INS_sve_tbl,INS_sve_dup/INS_sve_tbl,INS_sve_dup/INS_sve_tbl,INS_sve_dup/INS_sve_tbl,INS_sve_dup/INS_sve_tbl,INS_sve_dup/INS_sve_tbl,INS_sve_dup/INS_sve_tbl,INS_sve_dup/INS_sve_tbl},HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, ExtractAfterLastScalar, -1, 1, true, {INS_sve_lasta, INS_sve_lasta, INS_sve_lasta, INS_sve_lasta, INS_sve_lasta, INS_sve_lasta, INS_sve_lasta, INS_sve_lasta, INS_sve_lasta, INS_sve_lasta}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, ExtractAfterLastVector, -1, 1, true, {INS_sve_lasta, INS_sve_lasta, INS_sve_lasta, INS_sve_lasta, INS_sve_lasta, INS_sve_lasta, INS_sve_lasta, INS_sve_lasta, INS_sve_lasta, INS_sve_lasta}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, ExtractLastScalar, -1, 1, true, {INS_sve_lastb, INS_sve_lastb, INS_sve_lastb, INS_sve_lastb, INS_sve_lastb, INS_sve_lastb, INS_sve_lastb, INS_sve_lastb, INS_sve_lastb, INS_sve_lastb}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, ExtractLastVector, -1, 1, true, {INS_sve_lastb, INS_sve_lastb, INS_sve_lastb, INS_sve_lastb, INS_sve_lastb, INS_sve_lastb, INS_sve_lastb, INS_sve_lastb, INS_sve_lastb, INS_sve_lastb}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, ExtractVector, -1, 3, true, {INS_sve_ext, INS_sve_ext, INS_sve_ext, INS_sve_ext, INS_sve_ext, INS_sve_ext, INS_sve_ext, INS_sve_ext, INS_sve_ext, INS_sve_ext}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, FloatingPointExponentialAccelerator, -1, 1, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fexpa, INS_sve_fexpa}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve, FusedMultiplyAdd, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fmla, INS_sve_fmla}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, FusedMultiplyAddBySelectedScalar, -1, 4, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fmla, INS_sve_fmla}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, FusedMultiplyAddNegated, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fnmla, INS_sve_fnmla}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, FusedMultiplySubtract, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fmls, INS_sve_fmls}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, FusedMultiplySubtractBySelectedScalar, -1, 4, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fmls, INS_sve_fmls}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, FusedMultiplySubtractNegated, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fnmls, INS_sve_fnmls}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, GatherPrefetch16Bit, -1, 4, true, {INS_invalid, INS_invalid, INS_sve_prfh, INS_sve_prfh, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GatherPrefetch32Bit, -1, 4, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_prfw, INS_sve_prfw, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GatherPrefetch64Bit, -1, 4, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_prfd, INS_sve_prfd, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GatherPrefetch8Bit, -1, 4, true, {INS_sve_prfb, INS_sve_prfb, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GatherVector, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1w, INS_sve_ld1w, INS_sve_ld1d, INS_sve_ld1d, INS_sve_ld1w, INS_sve_ld1d}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GatherVectorByteZeroExtend, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1b, INS_sve_ld1b, INS_sve_ld1b, INS_sve_ld1b, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GatherVectorByteZeroExtendFirstFaulting, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldff1b, INS_sve_ldff1b, INS_sve_ldff1b, INS_sve_ldff1b, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GatherVectorFirstFaulting, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldff1w, INS_sve_ldff1w, INS_sve_ldff1d, INS_sve_ldff1d, INS_sve_ldff1w, INS_sve_ldff1d}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GatherVectorInt16SignExtend, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1sh, INS_sve_ld1sh, INS_sve_ld1sh, INS_sve_ld1sh, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GatherVectorInt16SignExtendFirstFaulting, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldff1sh, INS_sve_ldff1sh, INS_sve_ldff1sh, INS_sve_ldff1sh, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GatherVectorInt16WithByteOffsetsSignExtend, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1sh, INS_sve_ld1sh, INS_sve_ld1sh, INS_sve_ld1sh, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldff1sh, INS_sve_ldff1sh, INS_sve_ldff1sh, INS_sve_ldff1sh, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GatherVectorInt32SignExtend, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1sw, INS_sve_ld1sw, INS_sve_ld1sw, INS_sve_ld1sw, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GatherVectorInt32SignExtendFirstFaulting, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldff1sw, INS_sve_ldff1sw, INS_sve_ldff1sw, INS_sve_ldff1sw, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GatherVectorInt32WithByteOffsetsSignExtend, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1sw, INS_sve_ld1sw, INS_sve_ld1sw, INS_sve_ld1sw, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldff1sw, INS_sve_ldff1sw, INS_sve_ldff1sw, INS_sve_ldff1sw, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GatherVectorSByteSignExtend, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1sb, INS_sve_ld1sb, INS_sve_ld1sb, INS_sve_ld1sb, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GatherVectorSByteSignExtendFirstFaulting, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldff1sb, INS_sve_ldff1sb, INS_sve_ldff1sb, INS_sve_ldff1sb, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GatherVectorUInt16WithByteOffsetsZeroExtend, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1h, INS_sve_ld1h, INS_sve_ld1h, INS_sve_ld1h, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldff1h, INS_sve_ldff1h, INS_sve_ldff1h, INS_sve_ldff1h, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GatherVectorUInt16ZeroExtend, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1h, INS_sve_ld1h, INS_sve_ld1h, INS_sve_ld1h, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GatherVectorUInt16ZeroExtendFirstFaulting, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldff1h, INS_sve_ldff1h, INS_sve_ldff1h, INS_sve_ldff1h, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GatherVectorUInt32WithByteOffsetsZeroExtend, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1w, INS_sve_ld1w, INS_sve_ld1w, INS_sve_ld1w, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldff1w, INS_sve_ldff1w, INS_sve_ldff1w, INS_sve_ldff1w, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GatherVectorUInt32ZeroExtend, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1w, INS_sve_ld1w, INS_sve_ld1w, INS_sve_ld1w, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GatherVectorUInt32ZeroExtendFirstFaulting, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldff1w, INS_sve_ldff1w, INS_sve_ldff1w, INS_sve_ldff1w, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GatherVectorWithByteOffsetFirstFaulting, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldff1w, INS_sve_ldff1w, INS_sve_ldff1d, INS_sve_ldff1d, INS_sve_ldff1w, INS_sve_ldff1d}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GatherVectorWithByteOffsets, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1w, INS_sve_ld1w, INS_sve_ld1d, INS_sve_ld1d, INS_sve_ld1w, INS_sve_ld1d}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, GetActiveElementCount, -1, 2, true, {INS_sve_cntp, INS_sve_cntp, INS_sve_cntp, INS_sve_cntp, INS_sve_cntp, INS_sve_cntp, INS_sve_cntp, INS_sve_cntp, INS_sve_cntp, INS_sve_cntp}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, GetFfr, -1, 0, true, {INS_sve_rdffr, INS_sve_rdffr, INS_sve_rdffr, INS_sve_rdffr, INS_sve_rdffr, INS_sve_rdffr, INS_sve_rdffr, INS_sve_rdffr, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, InsertIntoShiftedVector, -1, 2, true, {INS_sve_insr, INS_sve_insr, INS_sve_insr, INS_sve_insr, INS_sve_insr, INS_sve_insr, INS_sve_insr, INS_sve_insr, INS_sve_insr, INS_sve_insr}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LeadingSignCount, -1, 1, true, {INS_invalid, INS_sve_cls, INS_invalid, INS_sve_cls, INS_invalid, INS_sve_cls, INS_invalid, INS_sve_cls, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve, LeadingZeroCount, -1, 1, true, {INS_invalid, INS_sve_clz, INS_invalid, INS_sve_clz, INS_invalid, INS_sve_clz, INS_invalid, INS_sve_clz, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, LoadVector, -1, 2, true, {INS_sve_ld1b, INS_sve_ld1b, INS_sve_ld1h, INS_sve_ld1h, INS_sve_ld1w, INS_sve_ld1w, INS_sve_ld1d, INS_sve_ld1d, INS_sve_ld1w, INS_sve_ld1d}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVector128AndReplicateToVector, -1, 2, true, {INS_sve_ld1rqb, INS_sve_ld1rqb, INS_sve_ld1rqh, INS_sve_ld1rqh, INS_sve_ld1rqw, INS_sve_ld1rqw, INS_sve_ld1rqd, INS_sve_ld1rqd, INS_sve_ld1rqw, INS_sve_ld1rqd}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorByteNonFaultingZeroExtendToInt16, -1, 1, false, {INS_invalid, INS_invalid, INS_sve_ldnf1b, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorByteNonFaultingZeroExtendToInt32, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnf1b, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorByteNonFaultingZeroExtendToInt64, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnf1b, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorByteNonFaultingZeroExtendToUInt16, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnf1b, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorByteNonFaultingZeroExtendToUInt32, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnf1b, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorByteNonFaultingZeroExtendToUInt64, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnf1b, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorByteZeroExtendFirstFaulting, -1, 2, true, {INS_invalid, INS_invalid, INS_sve_ldff1b, INS_sve_ldff1b, INS_sve_ldff1b, INS_sve_ldff1b, INS_sve_ldff1b, INS_sve_ldff1b, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorByteZeroExtendToInt16, -1, 2, false, {INS_invalid, INS_invalid, INS_sve_ld1b, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorByteZeroExtendToInt32, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1b, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorByteZeroExtendToInt64, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1b, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorByteZeroExtendToUInt16, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1b, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorByteZeroExtendToUInt32, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1b, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorByteZeroExtendToUInt64, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1b, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorFirstFaulting, -1, 2, true, {INS_sve_ldff1b, INS_sve_ldff1b, INS_sve_ldff1h, INS_sve_ldff1h, INS_sve_ldff1w, INS_sve_ldff1w, INS_sve_ldff1d, INS_sve_ldff1d, INS_sve_ldff1w, INS_sve_ldff1d}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorInt16NonFaultingSignExtendToInt32, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnf1sh, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorInt16NonFaultingSignExtendToInt64, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnf1sh, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorInt16NonFaultingSignExtendToUInt32, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnf1sh, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorInt16NonFaultingSignExtendToUInt64, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnf1sh, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorInt16SignExtendFirstFaulting, -1, 2, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldff1sh, INS_sve_ldff1sh, INS_sve_ldff1sh, INS_sve_ldff1sh, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorInt16SignExtendToInt32, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1sh, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorInt16SignExtendToInt64, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1sh, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorInt16SignExtendToUInt32, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1sh, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorInt16SignExtendToUInt64, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1sh, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorInt32NonFaultingSignExtendToInt64, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnf1sw, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorInt32NonFaultingSignExtendToUInt64, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnf1sw, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorInt32SignExtendFirstFaulting, -1, 2, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldff1sw, INS_sve_ldff1sw, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorInt32SignExtendToInt64, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1sw, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorInt32SignExtendToUInt64, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1sw, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorNonFaulting, -1, 1, true, {INS_sve_ldnf1b, INS_sve_ldnf1b, INS_sve_ldnf1h, INS_sve_ldnf1h, INS_sve_ldnf1w, INS_sve_ldnf1w, INS_sve_ldnf1d, INS_sve_ldnf1d, INS_sve_ldnf1w, INS_sve_ldnf1d}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorNonTemporal, -1, 2, true, {INS_sve_ldnt1b, INS_sve_ldnt1b, INS_sve_ldnt1h, INS_sve_ldnt1h, INS_sve_ldnt1w, INS_sve_ldnt1w, INS_sve_ldnt1d, INS_sve_ldnt1d, INS_sve_ldnt1w, INS_sve_ldnt1d}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorSByteNonFaultingSignExtendToInt16, -1, 1, false, {INS_invalid, INS_invalid, INS_sve_ldnf1sb, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorSByteNonFaultingSignExtendToInt32, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnf1sb, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorSByteNonFaultingSignExtendToInt64, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnf1sb, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorSByteNonFaultingSignExtendToUInt16, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnf1sb, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorSByteNonFaultingSignExtendToUInt32, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnf1sb, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorSByteNonFaultingSignExtendToUInt64, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnf1sb, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorSByteSignExtendFirstFaulting, -1, 2, true, {INS_invalid, INS_invalid, INS_sve_ldff1sb, INS_sve_ldff1sb, INS_sve_ldff1sb, INS_sve_ldff1sb, INS_sve_ldff1sb, INS_sve_ldff1sb, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorSByteSignExtendToInt16, -1, 2, false, {INS_invalid, INS_invalid, INS_sve_ld1sb, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorSByteSignExtendToInt32, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1sb, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorSByteSignExtendToInt64, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1sb, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorSByteSignExtendToUInt16, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1sb, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorSByteSignExtendToUInt32, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1sb, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorSByteSignExtendToUInt64, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1sb, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorUInt16NonFaultingZeroExtendToInt32, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnf1h, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorUInt16NonFaultingZeroExtendToInt64, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnf1h, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorUInt16NonFaultingZeroExtendToUInt32, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnf1h, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorUInt16NonFaultingZeroExtendToUInt64, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnf1h, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorUInt16ZeroExtendFirstFaulting, -1, 2, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldff1h, INS_sve_ldff1h, INS_sve_ldff1h, INS_sve_ldff1h, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorUInt16ZeroExtendToInt32, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1h, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorUInt16ZeroExtendToInt64, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1h, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorUInt16ZeroExtendToUInt32, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1h, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorUInt16ZeroExtendToUInt64, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1h, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorUInt32NonFaultingZeroExtendToInt64, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnf1w, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorUInt32NonFaultingZeroExtendToUInt64, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnf1w, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorUInt32ZeroExtendFirstFaulting, -1, 2, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldff1w, INS_sve_ldff1w, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorUInt32ZeroExtendToInt64, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1w, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorUInt32ZeroExtendToUInt64, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1w, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, LoadVectorx2, -1, 2, true, {INS_sve_ld2b, INS_sve_ld2b, INS_sve_ld2h, INS_sve_ld2h, INS_sve_ld2w, INS_sve_ld2w, INS_sve_ld2d, INS_sve_ld2d, INS_sve_ld2w, INS_sve_ld2d}, HW_Category_SIMD, HW_Flag_MultiReg) +HARDWARE_INTRINSIC(Sve, LoadVectorx3, -1, 2, true, {INS_sve_ld3b, INS_sve_ld3b, INS_sve_ld3h, INS_sve_ld3h, INS_sve_ld3w, INS_sve_ld3w, INS_sve_ld3d, INS_sve_ld3d, INS_sve_ld3w, INS_sve_ld3d}, HW_Category_SIMD, HW_Flag_MultiReg) +HARDWARE_INTRINSIC(Sve, LoadVectorx4, -1, 2, true, {INS_sve_ld4b, INS_sve_ld4b, INS_sve_ld4h, INS_sve_ld4h, INS_sve_ld4w, INS_sve_ld4w, INS_sve_ld4d, INS_sve_ld4d, INS_sve_ld4w, INS_sve_ld4d}, HW_Category_SIMD, HW_Flag_MultiReg) +HARDWARE_INTRINSIC(Sve, Max, -1, 2, true, {INS_sve_smax, INS_sve_umax, INS_sve_smax, INS_sve_umax, INS_sve_smax, INS_sve_umax, INS_sve_smax, INS_sve_umax, INS_sve_fmax, INS_sve_fmax}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, MaxAcross, -1, 1, true, {INS_sve_smaxv, INS_sve_umaxv, INS_sve_smaxv, INS_sve_umaxv, INS_sve_smaxv, INS_sve_umaxv, INS_sve_smaxv, INS_sve_umaxv, INS_sve_fmaxv, INS_sve_fmaxv}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, MaxNumber, -1, 2, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fmaxnm, INS_sve_fmaxnm}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, MaxNumberAcross, -1, 1, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fmaxnmv, INS_sve_fmaxnmv}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, Min, -1, 2, true, {INS_sve_smin, INS_sve_umin, INS_sve_smin, INS_sve_umin, INS_sve_smin, INS_sve_umin, INS_sve_smin, INS_sve_umin, INS_sve_fmin, INS_sve_fmin}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, MinAcross, -1, 1, true, {INS_sve_sminv, INS_sve_uminv, INS_sve_sminv, INS_sve_uminv, INS_sve_sminv, INS_sve_uminv, INS_sve_sminv, INS_sve_uminv, INS_sve_fminv, INS_sve_fminv}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, MinNumber, -1, 2, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fminnm, INS_sve_fminnm}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, MinNumberAcross, -1, 1, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fminnmv, INS_sve_fminnmv}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, Multiply, -1, 2, true, {INS_sve_mul, INS_sve_mul, INS_sve_mul, INS_sve_mul, INS_sve_mul, INS_sve_mul, INS_sve_mul, INS_sve_mul, INS_sve_fmul, INS_sve_fmul}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, MultiplyAdd, -1, 3, true, {INS_sve_mla, INS_sve_mla, INS_sve_mla, INS_sve_mla, INS_sve_mla, INS_sve_mla, INS_sve_mla, INS_sve_mla, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, MultiplyAddRotateComplex, -1, 4, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fcmla, INS_sve_fcmla}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, MultiplyAddRotateComplexBySelectedScalar, -1, 5, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fcmla, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, MultiplyBySelectedScalar, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fmul, INS_sve_fmul}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, MultiplyExtended, -1, 2, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fmulx, INS_sve_fmulx}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, MultiplySubtract, -1, 3, true, {INS_sve_mls, INS_sve_mls, INS_sve_mls, INS_sve_mls, INS_sve_mls, INS_sve_mls, INS_sve_mls, INS_sve_mls, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, Negate, -1, 1, true, {INS_sve_neg, INS_invalid, INS_sve_neg, INS_invalid, INS_sve_neg, INS_invalid, INS_sve_neg, INS_invalid, INS_sve_fneg, INS_sve_fneg}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, Not, -1, 1, true, {INS_sve_not, INS_sve_not, INS_sve_not, INS_sve_not, INS_sve_not, INS_sve_not, INS_sve_not, INS_sve_not, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, Or, -1, 2, true, {INS_sve_orr, INS_sve_orr, INS_sve_orr, INS_sve_orr, INS_sve_orr, INS_sve_orr, INS_sve_orr, INS_sve_orr, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, OrAcross, -1, 1, true, {INS_sve_orv, INS_sve_orv, INS_sve_orv, INS_sve_orv, INS_sve_orv, INS_sve_orv, INS_sve_orv, INS_sve_orv, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, OrNot, -1, 2, true, {INS_sve_nor, INS_sve_nor, INS_sve_nor, INS_sve_nor, INS_sve_nor, INS_sve_nor, INS_sve_nor, INS_sve_nor, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, PopCount, -1, 1, true, {INS_invalid, INS_sve_cnt, INS_invalid, INS_sve_cnt, INS_invalid, INS_sve_cnt, INS_invalid, INS_sve_cnt, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, PrefetchBytes, -1, 3, false, {INS_invalid, INS_sve_prfb, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, PrefetchInt16, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_sve_prfh, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, PrefetchInt32, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_prfw, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, PrefetchInt64, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_prfd, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, ReciprocalEstimate, -1, 1, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_frecpe, INS_sve_frecpe}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, ReciprocalExponent, -1, 1, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_frecpx, INS_sve_frecpx}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, ReciprocalSqrtEstimate, -1, 1, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_frsqrte, INS_sve_frsqrte}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, ReciprocalSqrtStep, -1, 2, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_frsqrts, INS_sve_frsqrts}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, ReciprocalStep, -1, 2, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_frecps, INS_sve_frecps}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, ReverseBits, -1, 1, true, {INS_sve_rbit, INS_sve_rbit, INS_sve_rbit, INS_sve_rbit, INS_sve_rbit, INS_sve_rbit, INS_sve_rbit, INS_sve_rbit, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, ReverseElement, -1, 1, true, {INS_sve_rev, INS_sve_rev, INS_sve_rev, INS_sve_rev, INS_sve_rev, INS_sve_rev, INS_sve_rev, INS_sve_rev, INS_sve_rev, INS_sve_rev}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, ReverseElement16, -1, 1, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_revh, INS_sve_revh, INS_sve_revh, INS_sve_revh, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, ReverseElement32, -1, 1, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_revw, INS_sve_revw, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, ReverseElement8, -1, 1, true, {INS_invalid, INS_invalid, INS_sve_revb, INS_sve_revb, INS_sve_revb, INS_sve_revb, INS_sve_revb, INS_sve_revb, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, RoundAwayFromZero, -1, 1, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_frinta, INS_sve_frinta}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, RoundToNearest, -1, 1, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_frintn, INS_sve_frintn}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, RoundToNegativeInfinity, -1, 1, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_frintm, INS_sve_frintm}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, RoundToPositiveInfinity, -1, 1, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_frintp, INS_sve_frintp}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, RoundToZero, -1, 1, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_frintz, INS_sve_frintz}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, SaturatingDecrementBy16BitElementCount, -1, 3, true, {INS_invalid, INS_invalid, INS_sve_sqdech, INS_sve_uqdech, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, SaturatingDecrementBy32BitElementCount, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_sqdecw, INS_sve_uqdecw, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, SaturatingDecrementBy64BitElementCount, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_sqdecd, INS_sve_uqdecd, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, SaturatingDecrementBy8BitElementCount, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, SaturatingDecrementByActiveElementCount, -1, 2, true, {INS_invalid, INS_sve_sqdecp/INS_sve_uqdecp,INS_sve_sqdecp, INS_sve_sqdecp/INS_sve_uqdecp,INS_sve_sqdecp, INS_sve_sqdecp/INS_sve_uqdecp,INS_sve_sqdecp, INS_sve_sqdecp/INS_sve_uqdecp,INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, SaturatingIncrementBy16BitElementCount, -1, 3, true, {INS_invalid, INS_invalid, INS_sve_sqinch, INS_sve_uqinch, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, SaturatingIncrementBy32BitElementCount, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_sqincw, INS_sve_uqincw, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, SaturatingIncrementBy64BitElementCount, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_sqincd, INS_sve_uqincd, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, SaturatingIncrementBy8BitElementCount, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, SaturatingIncrementByActiveElementCount, -1, 2, true, {INS_invalid, INS_sve_sqincp/INS_sve_uqincp,INS_sve_sqincp, INS_sve_sqincp/INS_sve_uqincp,INS_sve_sqincp, INS_sve_sqincp/INS_sve_uqincp,INS_sve_sqincp, INS_sve_sqincp/INS_sve_uqincp,INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, Scale, -1, 2, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fscale, INS_sve_fscale}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve, Scatter, -1, 4, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_st1w, INS_sve_st1w, INS_sve_st1d, INS_sve_st1d, INS_sve_st1w, INS_sve_st1d}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, Scatter16BitNarrowing, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_st1h, INS_sve_st1h, INS_sve_st1h, INS_sve_st1h, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve, Scatter16BitWithByteOffsetsNarrowing, -1, 4, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_st1h, INS_sve_st1h, INS_sve_st1h, INS_sve_st1h, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, Scatter32BitNarrowing, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_st1w, INS_sve_st1w, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve, Scatter32BitWithByteOffsetsNarrowing, -1, 4, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_st1w, INS_sve_st1w, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, Scatter8BitNarrowing, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_st1b, INS_sve_st1b, INS_sve_st1b, INS_sve_st1b, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve, Scatter8BitWithByteOffsetsNarrowing, -1, 4, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_st1b, INS_sve_st1b, INS_sve_st1b, INS_sve_st1b, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, SetFfr, -1, 1, true, {INS_sve_wrffr, INS_sve_wrffr, INS_sve_wrffr, INS_sve_wrffr, INS_sve_wrffr, INS_sve_wrffr, INS_sve_wrffr, INS_sve_wrffr, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, ShiftLeftLogical, -1, 2, true, {INS_sve_lsl, INS_sve_lsl, INS_sve_lsl, INS_sve_lsl, INS_sve_lsl, INS_sve_lsl, INS_sve_lsl, INS_sve_lsl, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, ShiftRightArithmetic, -1, 2, true, {INS_sve_asr, INS_invalid, INS_sve_asr, INS_invalid, INS_sve_asr, INS_invalid, INS_sve_asr, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, ShiftRightArithmeticForDivide, -1, 2, true, {INS_sve_asrd, INS_invalid, INS_sve_asrd, INS_invalid, INS_sve_asrd, INS_invalid, INS_sve_asrd, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, ShiftRightLogical, -1, 2, true, {INS_invalid, INS_sve_lsr, INS_invalid, INS_sve_lsr, INS_invalid, INS_sve_lsr, INS_invalid, INS_sve_lsr, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, SignExtend16, -1, 1, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_sxth, INS_invalid, INS_sve_sxth, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, SignExtend32, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_sxtw, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, SignExtend8, -1, 1, true, {INS_invalid, INS_invalid, INS_sve_sxtb, INS_invalid, INS_sve_sxtb, INS_invalid, INS_sve_sxtb, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, SignExtendWideningLower, -1, 1, true, {INS_invalid, INS_invalid, INS_sve_sunpklo, INS_invalid, INS_sve_sunpklo, INS_invalid, INS_sve_sunpklo, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve, SignExtendWideningUpper, -1, 1, true, {INS_invalid, INS_invalid, INS_sve_sunpkhi, INS_invalid, INS_sve_sunpkhi, INS_invalid, INS_sve_sunpkhi, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve, Splice, -1, 3, true, {INS_sve_splice, INS_sve_splice, INS_sve_splice, INS_sve_splice, INS_sve_splice, INS_sve_splice, INS_sve_splice, INS_sve_splice, INS_sve_splice, INS_sve_splice}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, Sqrt, -1, 1, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fsqrt, INS_sve_fsqrt}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, Store, -1, 3, true, {INS_sve_st1b/INS_sve_st2b/INS_sve_st3b/INS_sve_st4b,INS_sve_st1b/INS_sve_st2b/INS_sve_st3b/INS_sve_st4b,INS_sve_st1h/INS_sve_st2h/INS_sve_st3h/INS_sve_st4h,INS_sve_st1h/INS_sve_st2h/INS_sve_st3h/INS_sve_st4h,INS_sve_st1w/INS_sve_st2w/INS_sve_st3w/INS_sve_st4w,INS_sve_st1w/INS_sve_st2w/INS_sve_st3w/INS_sve_st4w,INS_sve_st1d/INS_sve_st2d/INS_sve_st3d/INS_sve_st4d,INS_sve_st1d/INS_sve_st2d/INS_sve_st3d/INS_sve_st4d,INS_sve_st1w/INS_sve_st2w/INS_sve_st3w/INS_sve_st4w,INS_sve_st1d/INS_sve_st2d/INS_sve_st3d/INS_sve_st4d},HW_Category_SIMD, HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, StoreNarrowing, -1, 3, true, {INS_invalid, INS_invalid, INS_sve_st1b, INS_sve_st1b, INS_sve_st1b/INS_sve_st1h,INS_sve_st1b/INS_sve_st1h,INS_sve_st1b/INS_sve_st1h/INS_sve_st1w,INS_sve_st1b/INS_sve_st1h/INS_sve_st1w,INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve, StoreNonTemporal, -1, 3, true, {INS_sve_stnt1b, INS_sve_stnt1b, INS_sve_stnt1h, INS_sve_stnt1h, INS_sve_stnt1w, INS_sve_stnt1w, INS_sve_stnt1d, INS_sve_stnt1d, INS_sve_stnt1w, INS_sve_stnt1d}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, Subtract, -1, 2, true, {INS_sve_sub, INS_sve_sub, INS_sve_sub, INS_sve_sub, INS_sve_sub, INS_sve_sub, INS_sve_sub, INS_sve_sub, INS_sve_fsub, INS_sve_fsub}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, SubtractSaturate, -1, 2, true, {INS_sve_sqsub, INS_sve_uqsub, INS_sve_sqsub, INS_sve_uqsub, INS_sve_sqsub, INS_sve_uqsub, INS_sve_sqsub, INS_sve_uqsub, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, TestAnyTrue, -1, 2, true, {INS_sve_ptest, INS_sve_ptest, INS_sve_ptest, INS_sve_ptest, INS_sve_ptest, INS_sve_ptest, INS_sve_ptest, INS_sve_ptest, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, TestFirstTrue, -1, 2, true, {INS_sve_ptest, INS_sve_ptest, INS_sve_ptest, INS_sve_ptest, INS_sve_ptest, INS_sve_ptest, INS_sve_ptest, INS_sve_ptest, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, TestLastTrue, -1, 2, true, {INS_sve_ptest, INS_sve_ptest, INS_sve_ptest, INS_sve_ptest, INS_sve_ptest, INS_sve_ptest, INS_sve_ptest, INS_sve_ptest, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, TransposeEven, -1, 2, true, {INS_sve_trn1, INS_sve_trn1, INS_sve_trn1, INS_sve_trn1, INS_sve_trn1, INS_sve_trn1, INS_sve_trn1, INS_sve_trn1, INS_sve_trn1, INS_sve_trn1}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, TransposeOdd, -1, 2, true, {INS_sve_trn2, INS_sve_trn2, INS_sve_trn2, INS_sve_trn2, INS_sve_trn2, INS_sve_trn2, INS_sve_trn2, INS_sve_trn2, INS_sve_trn2, INS_sve_trn2}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, TrigonometricMultiplyAddCoefficient, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ftmad, INS_sve_ftmad}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, TrigonometricSelectCoefficient, -1, 2, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ftssel, INS_sve_ftssel}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve, TrigonometricStartingValue, -1, 2, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ftsmul, INS_sve_ftsmul}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve, UnzipEven, -1, 2, true, {INS_sve_uzp1, INS_sve_uzp1, INS_sve_uzp1, INS_sve_uzp1, INS_sve_uzp1, INS_sve_uzp1, INS_sve_uzp1, INS_sve_uzp1, INS_sve_uzp1, INS_sve_uzp1}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, UnzipOdd, -1, 2, true, {INS_sve_uzp2, INS_sve_uzp2, INS_sve_uzp2, INS_sve_uzp2, INS_sve_uzp2, INS_sve_uzp2, INS_sve_uzp2, INS_sve_uzp2, INS_sve_uzp2, INS_sve_uzp2}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, VectorTableLookup, -1, 2, true, {INS_sve_tbl, INS_sve_tbl, INS_sve_tbl, INS_sve_tbl, INS_sve_tbl, INS_sve_tbl, INS_sve_tbl, INS_sve_tbl, INS_sve_tbl, INS_sve_tbl}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve, Xor, -1, 2, true, {INS_sve_eor, INS_sve_eor, INS_sve_eor, INS_sve_eor, INS_sve_eor, INS_sve_eor, INS_sve_eor, INS_sve_eor, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, XorAcross, -1, 1, true, {INS_sve_eorv, INS_sve_eorv, INS_sve_eorv, INS_sve_eorv, INS_sve_eorv, INS_sve_eorv, INS_sve_eorv, INS_sve_eorv, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, ZeroExtend16, -1, 1, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_uxth, INS_invalid, INS_sve_uxth, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, ZeroExtend32, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_uxtw, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, ZeroExtend8, -1, 1, true, {INS_invalid, INS_invalid, INS_invalid, INS_sve_uxtb, INS_invalid, INS_sve_uxtb, INS_invalid, INS_sve_uxtb, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, ZeroExtendWideningLower, -1, 1, true, {INS_invalid, INS_invalid, INS_invalid, INS_sve_uunpklo, INS_invalid, INS_sve_uunpklo, INS_invalid, INS_sve_uunpklo, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve, ZeroExtendWideningUpper, -1, 1, true, {INS_invalid, INS_invalid, INS_invalid, INS_sve_uunpkhi, INS_invalid, INS_sve_uunpkhi, INS_invalid, INS_sve_uunpkhi, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve, ZipHigh, -1, 2, true, {INS_sve_zip2, INS_sve_zip2, INS_sve_zip2, INS_sve_zip2, INS_sve_zip2, INS_sve_zip2, INS_sve_zip2, INS_sve_zip2, INS_sve_zip2, INS_sve_zip2}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve, ZipLow, -1, 2, true, {INS_sve_zip1, INS_sve_zip1, INS_sve_zip1, INS_sve_zip1, INS_sve_zip1, INS_sve_zip1, INS_sve_zip1, INS_sve_zip1, INS_sve_zip1, INS_sve_zip1}, HW_Category_SIMD, HW_Flag_NoFlag) + + +// *************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************** +// ISA Function name SIMD size NumArg EncodesExtraTypeArg Instructions Category Flags +// {TYP_BYTE, TYP_UBYTE, TYP_SHORT, TYP_USHORT, TYP_INT, TYP_UINT, TYP_LONG, TYP_ULONG, TYP_FLOAT, TYP_DOUBLE} +// *************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************** +// Sve2 +HARDWARE_INTRINSIC(Sve2, AbsoluteDifferenceAdd, -1, 3, true, {INS_sve_saba, INS_sve_uaba, INS_sve_saba, INS_sve_uaba, INS_sve_saba, INS_sve_uaba, INS_sve_saba, INS_sve_uaba, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, AbsoluteDifferenceAddWideningLower, -1, 3, true, {INS_invalid, INS_invalid, INS_sve_sabalb, INS_sve_uabalb, INS_sve_sabalb, INS_sve_uabalb, INS_sve_sabalb, INS_sve_uabalb, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, AbsoluteDifferenceAddWideningUpper, -1, 3, true, {INS_invalid, INS_invalid, INS_sve_sabalt, INS_sve_uabalt, INS_sve_sabalt, INS_sve_uabalt, INS_sve_sabalt, INS_sve_uabalt, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, AbsoluteDifferenceWideningLower, -1, 2, true, {INS_invalid, INS_invalid, INS_sve_sabdlb, INS_sve_uabdlb, INS_sve_sabdlb, INS_sve_uabdlb, INS_sve_sabdlb, INS_sve_uabdlb, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, AbsoluteDifferenceWideningUpper, -1, 2, true, {INS_invalid, INS_invalid, INS_sve_sabdlt, INS_sve_uabdlt, INS_sve_sabdlt, INS_sve_uabdlt, INS_sve_sabdlt, INS_sve_uabdlt, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, AddCarryWideningLower, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_adclb, INS_invalid, INS_sve_adclb, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, AddCarryWideningUpper, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_adclt, INS_invalid, INS_sve_adclt, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, AddHighNarowingLower, -1, 2, true, {INS_sve_addhnb, INS_sve_addhnb, INS_sve_addhnb, INS_sve_addhnb, INS_sve_addhnb, INS_sve_addhnb, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, AddHighNarowingUpper, -1, 3, true, {INS_sve_addhnt, INS_sve_addhnt, INS_sve_addhnt, INS_sve_addhnt, INS_sve_addhnt, INS_sve_addhnt, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, AddPairwise, -1, 2, true, {INS_sve_addp, INS_sve_addp, INS_sve_addp, INS_sve_addp, INS_sve_addp, INS_sve_addp, INS_sve_addp, INS_sve_addp, INS_sve_faddp, INS_sve_faddp}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, AddPairwiseWidening, -1, 2, true, {INS_invalid, INS_invalid, INS_sve_sadalp, INS_sve_uadalp, INS_sve_sadalp, INS_sve_uadalp, INS_sve_sadalp, INS_sve_uadalp, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, AddRotateComplex, -1, 3, true, {INS_sve_cadd, INS_sve_cadd, INS_sve_cadd, INS_sve_cadd, INS_sve_cadd, INS_sve_cadd, INS_sve_cadd, INS_sve_cadd, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, AddSaturate, -1, 2, true, {INS_sve_sqadd, INS_sve_uqadd, INS_sve_sqadd, INS_sve_uqadd, INS_sve_sqadd, INS_sve_uqadd, INS_sve_sqadd, INS_sve_uqadd, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, AddSaturateWithSignedAddend, -1, 2, true, {INS_invalid, INS_sve_usqadd, INS_invalid, INS_sve_usqadd, INS_invalid, INS_sve_usqadd, INS_invalid, INS_sve_usqadd, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, AddSaturateWithUnsignedAddend, -1, 2, true, {INS_sve_suqadd, INS_invalid, INS_sve_suqadd, INS_invalid, INS_sve_suqadd, INS_invalid, INS_sve_suqadd, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, AddWideLower, -1, 2, true, {INS_invalid, INS_invalid, INS_sve_saddwb, INS_sve_uaddwb, INS_sve_saddwb, INS_sve_uaddwb, INS_sve_saddwb, INS_sve_uaddwb, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, AddWideUpper, -1, 2, true, {INS_invalid, INS_invalid, INS_sve_saddwt, INS_sve_uaddwt, INS_sve_saddwt, INS_sve_uaddwt, INS_sve_saddwt, INS_sve_uaddwt, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, AddWideningLower, -1, 2, true, {INS_invalid, INS_invalid, INS_sve_saddlb, INS_sve_uaddlb, INS_sve_saddlb, INS_sve_uaddlb, INS_sve_saddlb, INS_sve_uaddlb, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, AddWideningLowerUpper, -1, 2, true, {INS_invalid, INS_invalid, INS_sve_saddlbt, INS_invalid, INS_sve_saddlbt, INS_invalid, INS_sve_saddlbt, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, AddWideningUpper, -1, 2, true, {INS_invalid, INS_invalid, INS_sve_saddlt, INS_sve_uaddlt, INS_sve_saddlt, INS_sve_uaddlt, INS_sve_saddlt, INS_sve_uaddlt, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, BitwiseClearXor, -1, 3, true, {INS_sve_bcax, INS_sve_bcax, INS_sve_bcax, INS_sve_bcax, INS_sve_bcax, INS_sve_bcax, INS_sve_bcax, INS_sve_bcax, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, BitwiseSelect, -1, 3, true, {INS_sve_bsl, INS_sve_bsl, INS_sve_bsl, INS_sve_bsl, INS_sve_bsl, INS_sve_bsl, INS_sve_bsl, INS_sve_bsl, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, BitwiseSelectLeftInverted, -1, 3, true, {INS_sve_bsl1n, INS_sve_bsl1n, INS_sve_bsl1n, INS_sve_bsl1n, INS_sve_bsl1n, INS_sve_bsl1n, INS_sve_bsl1n, INS_sve_bsl1n, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, BitwiseSelectRightInverted, -1, 3, true, {INS_sve_bsl2n, INS_sve_bsl2n, INS_sve_bsl2n, INS_sve_bsl2n, INS_sve_bsl2n, INS_sve_bsl2n, INS_sve_bsl2n, INS_sve_bsl2n, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, CountMatchingElements, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_histcnt, INS_invalid, INS_sve_histcnt, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, CountMatchingElementsIn128BitSegments, -1, 2, false, {INS_invalid, INS_sve_histseg, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, CreateWhileGreaterThanMask, -1, 2, true, {INS_invalid, INS_sve_whilegt/INS_sve_whilehi,INS_invalid, INS_sve_whilegt/INS_sve_whilehi,INS_invalid, INS_sve_whilegt/INS_sve_whilehi,INS_invalid, INS_sve_whilegt/INS_sve_whilehi,INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, CreateWhileGreaterThanOrEqualMask, -1, 2, true, {INS_invalid, INS_sve_whilege/INS_sve_whilehs,INS_invalid, INS_sve_whilege/INS_sve_whilehs,INS_invalid, INS_sve_whilege/INS_sve_whilehs,INS_invalid, INS_sve_whilege/INS_sve_whilehs,INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, CreateWhileReadAfterWriteMask, -1, 2, true, {INS_sve_whilerw, INS_sve_whilerw, INS_sve_whilerw, INS_sve_whilerw, INS_sve_whilerw, INS_sve_whilerw, INS_sve_whilerw, INS_sve_whilerw, INS_sve_whilerw, INS_sve_whilerw}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, CreateWhileWriteAfterReadMask, -1, 2, true, {INS_sve_whilewr, INS_sve_whilewr, INS_sve_whilewr, INS_sve_whilewr, INS_sve_whilewr, INS_sve_whilewr, INS_sve_whilewr, INS_sve_whilewr, INS_sve_whilewr, INS_sve_whilewr}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, DotProductComplex, -1, 5, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_cdot, INS_invalid, INS_sve_cdot, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, DownConvertNarrowingUpper, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fcvtnt, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, DownConvertRoundingOdd, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fcvtx, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, DownConvertRoundingOddUpper, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fcvtxnt, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, GatherVectorByteZeroExtendNonTemporal, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnt1b, INS_sve_ldnt1b, INS_sve_ldnt1b, INS_sve_ldnt1b, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, GatherVectorInt16SignExtendNonTemporal, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnt1sh, INS_sve_ldnt1sh, INS_sve_ldnt1sh, INS_sve_ldnt1sh, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, GatherVectorInt16WithByteOffsetsSignExtendNonTemporal, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnt1sh, INS_sve_ldnt1sh, INS_sve_ldnt1sh, INS_sve_ldnt1sh, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, GatherVectorInt32SignExtendNonTemporal, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnt1sw, INS_sve_ldnt1sw, INS_sve_ldnt1sw, INS_sve_ldnt1sw, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, GatherVectorInt32WithByteOffsetsSignExtendNonTemporal, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnt1sw, INS_sve_ldnt1sw, INS_sve_ldnt1sw, INS_sve_ldnt1sw, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, GatherVectorNonTemporal, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnt1w, INS_sve_ldnt1w, INS_sve_ldnt1d, INS_sve_ldnt1d, INS_sve_ldnt1w, INS_sve_ldnt1d}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, GatherVectorSByteSignExtendNonTemporal, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnt1sb, INS_sve_ldnt1sb, INS_sve_ldnt1sb, INS_sve_ldnt1sb, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnt1h, INS_sve_ldnt1h, INS_sve_ldnt1h, INS_sve_ldnt1h, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, GatherVectorUInt16ZeroExtendNonTemporal, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnt1h, INS_sve_ldnt1h, INS_sve_ldnt1h, INS_sve_ldnt1h, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnt1w, INS_sve_ldnt1w, INS_sve_ldnt1w, INS_sve_ldnt1w, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, GatherVectorUInt32ZeroExtendNonTemporal, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnt1w, INS_sve_ldnt1w, INS_sve_ldnt1w, INS_sve_ldnt1w, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, HalvingAdd, -1, 2, true, {INS_sve_shadd, INS_sve_uhadd, INS_sve_shadd, INS_sve_uhadd, INS_sve_shadd, INS_sve_uhadd, INS_sve_shadd, INS_sve_uhadd, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, HalvingSubtract, -1, 2, true, {INS_sve_shsub, INS_sve_uhsub, INS_sve_shsub, INS_sve_uhsub, INS_sve_shsub, INS_sve_uhsub, INS_sve_shsub, INS_sve_uhsub, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, HalvingSubtractReversed, -1, 2, true, {INS_sve_shsubr, INS_sve_uhsubr, INS_sve_shsubr, INS_sve_uhsubr, INS_sve_shsubr, INS_sve_uhsubr, INS_sve_shsubr, INS_sve_uhsubr, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, InterleavingXorLowerUpper, -1, 3, true, {INS_sve_eorbt, INS_sve_eorbt, INS_sve_eorbt, INS_sve_eorbt, INS_sve_eorbt, INS_sve_eorbt, INS_sve_eorbt, INS_sve_eorbt, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, InterleavingXorUpperLower, -1, 3, true, {INS_sve_eortb, INS_sve_eortb, INS_sve_eortb, INS_sve_eortb, INS_sve_eortb, INS_sve_eortb, INS_sve_eortb, INS_sve_eortb, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, Log2, -1, 1, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_flogb, INS_invalid, INS_sve_flogb, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, Match, -1, 3, true, {INS_sve_match, INS_sve_match, INS_sve_match, INS_sve_match, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, MaxNumberPairwise, -1, 2, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fmaxnmp, INS_sve_fmaxnmp}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, MaxPairwise, -1, 2, true, {INS_sve_smaxp, INS_sve_umaxp, INS_sve_smaxp, INS_sve_umaxp, INS_sve_smaxp, INS_sve_umaxp, INS_sve_smaxp, INS_sve_umaxp, INS_sve_fmaxp, INS_sve_fmaxp}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, MinNumberPairwise, -1, 2, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fminnmp, INS_sve_fminnmp}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, MinPairwise, -1, 2, true, {INS_sve_sminp, INS_sve_uminp, INS_sve_sminp, INS_sve_uminp, INS_sve_sminp, INS_sve_uminp, INS_sve_sminp, INS_sve_uminp, INS_sve_fminp, INS_sve_fminp}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, MoveWideningLower, -1, 1, true, {INS_invalid, INS_invalid, INS_sve_sshllb, INS_sve_ushllb, INS_sve_sshllb, INS_sve_ushllb, INS_sve_sshllb, INS_sve_ushllb, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, MoveWideningUpper, -1, 1, true, {INS_invalid, INS_invalid, INS_sve_sshllt, INS_sve_ushllt, INS_sve_sshllt, INS_sve_ushllt, INS_sve_sshllt, INS_sve_ushllt, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, MultiplyAddBySelectedScalar, -1, 4, true, {INS_invalid, INS_invalid, INS_sve_mla, INS_sve_mla, INS_sve_mla, INS_sve_mla, INS_sve_mla, INS_sve_mla, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, MultiplyAddRotateComplex, -1, 4, true, {INS_sve_cmla, INS_sve_cmla, INS_sve_cmla, INS_sve_cmla, INS_sve_cmla, INS_sve_cmla, INS_sve_cmla, INS_sve_cmla, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, MultiplyAddRotateComplexBySelectedScalar, -1, 5, true, {INS_invalid, INS_invalid, INS_sve_cmla, INS_sve_cmla, INS_sve_cmla, INS_sve_cmla, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, MultiplyAddWideningLower, -1, 4, true, {INS_invalid, INS_invalid, INS_sve_smlalb, INS_sve_umlalb, INS_sve_smlalb, INS_sve_umlalb, INS_sve_smlalb, INS_sve_umlalb, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, MultiplyAddWideningUpper, -1, 4, true, {INS_invalid, INS_invalid, INS_sve_smlalt, INS_sve_umlalt, INS_sve_smlalt, INS_sve_umlalt, INS_sve_smlalt, INS_sve_umlalt, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, MultiplyBySelectedScalar, -1, 3, true, {INS_invalid, INS_invalid, INS_sve_mul, INS_sve_mul, INS_sve_mul, INS_sve_mul, INS_sve_mul, INS_sve_mul, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, MultiplySubtractBySelectedScalar, -1, 4, true, {INS_invalid, INS_invalid, INS_sve_mls, INS_sve_mls, INS_sve_mls, INS_sve_mls, INS_sve_mls, INS_sve_mls, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, MultiplySubtractWideningLower, -1, 4, true, {INS_invalid, INS_invalid, INS_sve_smlslb, INS_sve_umlslb, INS_sve_smlslb, INS_sve_umlslb, INS_sve_smlslb, INS_sve_umlslb, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, MultiplySubtractWideningUpper, -1, 4, true, {INS_invalid, INS_invalid, INS_sve_smlslt, INS_sve_umlslt, INS_sve_smlslt, INS_sve_umlslt, INS_sve_smlslt, INS_sve_umlslt, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, MultiplyWideningLower, -1, 3, true, {INS_invalid, INS_invalid, INS_sve_smullb, INS_sve_umullb, INS_sve_smullb, INS_sve_umullb, INS_sve_smullb, INS_sve_umullb, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, MultiplyWideningUpper, -1, 3, true, {INS_invalid, INS_invalid, INS_sve_smullt, INS_sve_umullt, INS_sve_smullt, INS_sve_umullt, INS_sve_smullt, INS_sve_umullt, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, NoMatch, -1, 3, true, {INS_sve_nmatch, INS_sve_nmatch, INS_sve_nmatch, INS_sve_nmatch, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, PolynomialMultiply, -1, 2, false, {INS_invalid, INS_sve_pmul, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, PolynomialMultiplyWideningLower, -1, 2, true, {INS_invalid, INS_sve_pmullb, INS_invalid, INS_sve_pmullb, INS_invalid, INS_sve_pmullb, INS_invalid, INS_sve_pmullb, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, PolynomialMultiplyWideningUpper, -1, 2, true, {INS_invalid, INS_sve_pmullt, INS_invalid, INS_sve_pmullt, INS_invalid, INS_sve_pmullt, INS_invalid, INS_sve_pmullt, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, ReciprocalEstimate, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_urecpe, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, ReciprocalSqrtEstimate, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ursqrte, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, RoundingAddHighNarowingLower, -1, 2, true, {INS_sve_raddhnb, INS_sve_raddhnb, INS_sve_raddhnb, INS_sve_raddhnb, INS_sve_raddhnb, INS_sve_raddhnb, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, RoundingAddHighNarowingUpper, -1, 3, true, {INS_sve_raddhnt, INS_sve_raddhnt, INS_sve_raddhnt, INS_sve_raddhnt, INS_sve_raddhnt, INS_sve_raddhnt, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, RoundingHalvingAdd, -1, 2, true, {INS_sve_srhadd, INS_sve_urhadd, INS_sve_srhadd, INS_sve_urhadd, INS_sve_srhadd, INS_sve_urhadd, INS_sve_srhadd, INS_sve_urhadd, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, RoundingSubtractHighNarowingLower, -1, 2, true, {INS_sve_rsubhnb, INS_sve_rsubhnb, INS_sve_rsubhnb, INS_sve_rsubhnb, INS_sve_rsubhnb, INS_sve_rsubhnb, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, RoundingSubtractHighNarowingUpper, -1, 3, true, {INS_sve_rsubhnt, INS_sve_rsubhnt, INS_sve_rsubhnt, INS_sve_rsubhnt, INS_sve_rsubhnt, INS_sve_rsubhnt, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, SaturatingAbs, -1, 1, true, {INS_sve_sqabs, INS_invalid, INS_sve_sqabs, INS_invalid, INS_sve_sqabs, INS_invalid, INS_sve_sqabs, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, SaturatingComplexAddRotate, -1, 3, true, {INS_sve_sqcadd, INS_invalid, INS_sve_sqcadd, INS_invalid, INS_sve_sqcadd, INS_invalid, INS_sve_sqcadd, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, SaturatingDoublingMultiplyAddWideningLower, -1, 4, true, {INS_invalid, INS_invalid, INS_sve_sqdmlalb, INS_invalid, INS_sve_sqdmlalb, INS_invalid, INS_sve_sqdmlalb, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, SaturatingDoublingMultiplyAddWideningLowerUpper, -1, 3, true, {INS_invalid, INS_invalid, INS_sve_sqdmlalbt, INS_invalid, INS_sve_sqdmlalbt, INS_invalid, INS_sve_sqdmlalbt, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, SaturatingDoublingMultiplyAddWideningUpper, -1, 4, true, {INS_invalid, INS_invalid, INS_sve_sqdmlalt, INS_invalid, INS_sve_sqdmlalt, INS_invalid, INS_sve_sqdmlalt, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, SaturatingDoublingMultiplyHigh, -1, 3, true, {INS_sve_sqdmulh, INS_invalid, INS_sve_sqdmulh, INS_invalid, INS_sve_sqdmulh, INS_invalid, INS_sve_sqdmulh, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, SaturatingDoublingMultiplySubtractWideningLower, -1, 4, true, {INS_invalid, INS_invalid, INS_sve_sqdmlslb, INS_invalid, INS_sve_sqdmlslb, INS_invalid, INS_sve_sqdmlslb, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, SaturatingDoublingMultiplySubtractWideningLowerUpper, -1, 3, true, {INS_invalid, INS_invalid, INS_sve_sqdmlslbt, INS_invalid, INS_sve_sqdmlslbt, INS_invalid, INS_sve_sqdmlslbt, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, SaturatingDoublingMultiplySubtractWideningUpper, -1, 4, true, {INS_invalid, INS_invalid, INS_sve_sqdmlslt, INS_invalid, INS_sve_sqdmlslt, INS_invalid, INS_sve_sqdmlslt, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, SaturatingDoublingMultiplyWideningLower, -1, 3, true, {INS_invalid, INS_invalid, INS_sve_sqdmullb, INS_invalid, INS_sve_sqdmullb, INS_invalid, INS_sve_sqdmullb, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, SaturatingDoublingMultiplyWideningUpper, -1, 3, true, {INS_invalid, INS_invalid, INS_sve_sqdmullt, INS_invalid, INS_sve_sqdmullt, INS_invalid, INS_sve_sqdmullt, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, SaturatingExtractNarrowingLower, -1, 1, true, {INS_sve_sqxtnb, INS_sve_uqxtnb, INS_sve_sqxtnb, INS_sve_uqxtnb, INS_sve_sqxtnb, INS_sve_uqxtnb, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, SaturatingExtractNarrowingUpper, -1, 2, true, {INS_sve_sqxtnt, INS_sve_uqxtnt, INS_sve_sqxtnt, INS_sve_uqxtnt, INS_sve_sqxtnt, INS_sve_uqxtnt, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, SaturatingExtractUnsignedNarrowingLower, -1, 1, true, {INS_invalid, INS_sve_sqxtunb, INS_invalid, INS_sve_sqxtunb, INS_invalid, INS_sve_sqxtunb, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, SaturatingExtractUnsignedNarrowingUpper, -1, 2, true, {INS_invalid, INS_sve_sqxtunt, INS_invalid, INS_sve_sqxtunt, INS_invalid, INS_sve_sqxtunt, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, SaturatingNegate, -1, 1, true, {INS_sve_sqneg, INS_invalid, INS_sve_sqneg, INS_invalid, INS_sve_sqneg, INS_invalid, INS_sve_sqneg, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, SaturatingRoundingDoublingComplexMultiplyAddHighRotate, -1, 5, true, {INS_sve_sqrdcmlah, INS_invalid, INS_sve_sqrdcmlah, INS_invalid, INS_sve_sqrdcmlah, INS_invalid, INS_sve_sqrdcmlah, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, SaturatingRoundingDoublingMultiplyAddHigh, -1, 4, true, {INS_sve_sqrdmlah, INS_invalid, INS_sve_sqrdmlah, INS_invalid, INS_sve_sqrdmlah, INS_invalid, INS_sve_sqrdmlah, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, SaturatingRoundingDoublingMultiplyHigh, -1, 3, true, {INS_sve_sqrdmulh, INS_invalid, INS_sve_sqrdmulh, INS_invalid, INS_sve_sqrdmulh, INS_invalid, INS_sve_sqrdmulh, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, SaturatingRoundingDoublingMultiplySubtractHigh, -1, 4, true, {INS_sve_sqrdmlsh, INS_invalid, INS_sve_sqrdmlsh, INS_invalid, INS_sve_sqrdmlsh, INS_invalid, INS_sve_sqrdmlsh, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, Scatter16BitNarrowing, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_stnt1h, INS_sve_stnt1h, INS_sve_stnt1h, INS_sve_stnt1h, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, Scatter16BitWithByteOffsetsNarrowing, -1, 4, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_stnt1h, INS_sve_stnt1h, INS_sve_stnt1h, INS_sve_stnt1h, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, Scatter32BitNarrowing, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_stnt1w, INS_sve_stnt1w, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, Scatter32BitWithByteOffsetsNarrowing, -1, 4, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_stnt1w, INS_sve_stnt1w, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, Scatter8BitNarrowing, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_stnt1b, INS_sve_stnt1b, INS_sve_stnt1b, INS_sve_stnt1b, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, Scatter8BitWithByteOffsetsNarrowing, -1, 4, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_stnt1b, INS_sve_stnt1b, INS_sve_stnt1b, INS_sve_stnt1b, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, ScatterNonTemporal, -1, 4, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_stnt1w, INS_sve_stnt1w, INS_sve_stnt1d, INS_sve_stnt1d, INS_sve_stnt1w, INS_sve_stnt1d}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(Sve2, ShiftArithmeticRounded, -1, 2, true, {INS_sve_srshl, INS_invalid, INS_sve_srshl, INS_invalid, INS_sve_srshl, INS_invalid, INS_sve_srshl, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, ShiftArithmeticRoundedSaturate, -1, 2, true, {INS_sve_sqrshl, INS_invalid, INS_sve_sqrshl, INS_invalid, INS_sve_sqrshl, INS_invalid, INS_sve_sqrshl, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, ShiftArithmeticSaturate, -1, 2, true, {INS_sve_sqshl, INS_invalid, INS_sve_sqshl, INS_invalid, INS_sve_sqshl, INS_invalid, INS_sve_sqshl, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, ShiftLeftAndInsert, -1, 3, true, {INS_sve_sli, INS_sve_sli, INS_sve_sli, INS_sve_sli, INS_sve_sli, INS_sve_sli, INS_sve_sli, INS_sve_sli, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, ShiftLeftLogicalSaturate, -1, 2, true, {INS_invalid, INS_sve_uqshl, INS_invalid, INS_sve_uqshl, INS_invalid, INS_sve_uqshl, INS_invalid, INS_sve_uqshl, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, ShiftLeftLogicalSaturateUnsigned, -1, 2, true, {INS_invalid, INS_sve_sqshlu, INS_invalid, INS_sve_sqshlu, INS_invalid, INS_sve_sqshlu, INS_invalid, INS_sve_sqshlu, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, ShiftLeftLogicalWideningEven, -1, 2, true, {INS_invalid, INS_invalid, INS_sve_sshllb, INS_sve_ushllb, INS_sve_sshllb, INS_sve_ushllb, INS_sve_sshllb, INS_sve_ushllb, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, ShiftLeftLogicalWideningOdd, -1, 2, true, {INS_invalid, INS_invalid, INS_sve_sshllt, INS_sve_ushllt, INS_sve_sshllt, INS_sve_ushllt, INS_sve_sshllt, INS_sve_ushllt, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, ShiftLogicalRounded, -1, 2, true, {INS_invalid, INS_sve_urshl, INS_invalid, INS_sve_urshl, INS_invalid, INS_sve_urshl, INS_invalid, INS_sve_urshl, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, ShiftLogicalRoundedSaturate, -1, 2, true, {INS_invalid, INS_sve_uqrshl, INS_invalid, INS_sve_uqrshl, INS_invalid, INS_sve_uqrshl, INS_invalid, INS_sve_uqrshl, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, ShiftRightAndInsert, -1, 3, true, {INS_sve_sri, INS_sve_sri, INS_sve_sri, INS_sve_sri, INS_sve_sri, INS_sve_sri, INS_sve_sri, INS_sve_sri, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, ShiftRightArithmeticAdd, -1, 3, true, {INS_sve_ssra, INS_invalid, INS_sve_ssra, INS_invalid, INS_sve_ssra, INS_invalid, INS_sve_ssra, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, ShiftRightArithmeticNarrowingSaturateEven, -1, 2, true, {INS_sve_sqshrnb, INS_sve_uqshrnb, INS_sve_sqshrnb, INS_sve_uqshrnb, INS_sve_sqshrnb, INS_sve_uqshrnb, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, ShiftRightArithmeticNarrowingSaturateOdd, -1, 3, true, {INS_sve_sqshrnt, INS_sve_uqshrnt, INS_sve_sqshrnt, INS_sve_uqshrnt, INS_sve_sqshrnt, INS_sve_uqshrnt, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, ShiftRightArithmeticNarrowingSaturateUnsignedEven, -1, 2, true, {INS_invalid, INS_sve_sqshrunb, INS_invalid, INS_sve_sqshrunb, INS_invalid, INS_sve_sqshrunb, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, ShiftRightArithmeticNarrowingSaturateUnsignedOdd, -1, 3, true, {INS_invalid, INS_sve_sqshrunt, INS_invalid, INS_sve_sqshrunt, INS_invalid, INS_sve_sqshrunt, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, ShiftRightArithmeticRounded, -1, 2, true, {INS_sve_srshr, INS_invalid, INS_sve_srshr, INS_invalid, INS_sve_srshr, INS_invalid, INS_sve_srshr, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, ShiftRightArithmeticRoundedAdd, -1, 3, true, {INS_sve_srsra, INS_invalid, INS_sve_srsra, INS_invalid, INS_sve_srsra, INS_invalid, INS_sve_srsra, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, ShiftRightArithmeticRoundedNarrowingSaturateEven, -1, 2, true, {INS_sve_sqrshrnb, INS_invalid, INS_sve_sqrshrnb, INS_invalid, INS_sve_sqrshrnb, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, ShiftRightArithmeticRoundedNarrowingSaturateOdd, -1, 3, true, {INS_sve_sqrshrnt, INS_invalid, INS_sve_sqrshrnt, INS_invalid, INS_sve_sqrshrnt, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven, -1, 2, true, {INS_invalid, INS_sve_sqrshrunb, INS_invalid, INS_sve_sqrshrunb, INS_invalid, INS_sve_sqrshrunb, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd, -1, 3, true, {INS_invalid, INS_sve_sqrshrunt, INS_invalid, INS_sve_sqrshrunt, INS_invalid, INS_sve_sqrshrunt, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, ShiftRightLogicalAdd, -1, 3, true, {INS_invalid, INS_sve_usra, INS_invalid, INS_sve_usra, INS_invalid, INS_sve_usra, INS_invalid, INS_sve_usra, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, ShiftRightLogicalNarrowingEven, -1, 2, true, {INS_sve_shrnb, INS_sve_shrnb, INS_sve_shrnb, INS_sve_shrnb, INS_sve_shrnb, INS_sve_shrnb, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, ShiftRightLogicalNarrowingOdd, -1, 3, true, {INS_sve_shrnt, INS_sve_shrnt, INS_sve_shrnt, INS_sve_shrnt, INS_sve_shrnt, INS_sve_shrnt, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, ShiftRightLogicalRounded, -1, 2, true, {INS_invalid, INS_sve_urshr, INS_invalid, INS_sve_urshr, INS_invalid, INS_sve_urshr, INS_invalid, INS_sve_urshr, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, ShiftRightLogicalRoundedAdd, -1, 3, true, {INS_invalid, INS_sve_ursra, INS_invalid, INS_sve_ursra, INS_invalid, INS_sve_ursra, INS_invalid, INS_sve_ursra, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, ShiftRightLogicalRoundedNarrowingEven, -1, 2, true, {INS_sve_rshrnb, INS_sve_rshrnb, INS_sve_rshrnb, INS_sve_rshrnb, INS_sve_rshrnb, INS_sve_rshrnb, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, ShiftRightLogicalRoundedNarrowingOdd, -1, 3, true, {INS_sve_rshrnt, INS_sve_rshrnt, INS_sve_rshrnt, INS_sve_rshrnt, INS_sve_rshrnt, INS_sve_rshrnt, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, ShiftRightLogicalRoundedNarrowingSaturateEven, -1, 2, true, {INS_invalid, INS_sve_uqrshrnb, INS_invalid, INS_sve_uqrshrnb, INS_invalid, INS_sve_uqrshrnb, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, ShiftRightLogicalRoundedNarrowingSaturateOdd, -1, 3, true, {INS_invalid, INS_sve_uqrshrnt, INS_invalid, INS_sve_uqrshrnt, INS_invalid, INS_sve_uqrshrnt, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, SubtractHighNarowingLower, -1, 2, true, {INS_sve_subhnb, INS_sve_subhnb, INS_sve_subhnb, INS_sve_subhnb, INS_sve_subhnb, INS_sve_subhnb, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, SubtractHighNarowingUpper, -1, 3, true, {INS_sve_subhnt, INS_sve_subhnt, INS_sve_subhnt, INS_sve_subhnt, INS_sve_subhnt, INS_sve_subhnt, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, SubtractSaturate, -1, 2, true, {INS_sve_sqsub, INS_sve_uqsub, INS_sve_sqsub, INS_sve_uqsub, INS_sve_sqsub, INS_sve_uqsub, INS_sve_sqsub, INS_sve_uqsub, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, SubtractSaturateReversed, -1, 2, true, {INS_sve_sqsubr, INS_sve_uqsubr, INS_sve_sqsubr, INS_sve_uqsubr, INS_sve_sqsubr, INS_sve_uqsubr, INS_sve_sqsubr, INS_sve_uqsubr, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, SubtractWideLower, -1, 2, true, {INS_invalid, INS_invalid, INS_sve_ssubwb, INS_sve_usubwb, INS_sve_ssubwb, INS_sve_usubwb, INS_sve_ssubwb, INS_sve_usubwb, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, SubtractWideUpper, -1, 2, true, {INS_invalid, INS_invalid, INS_sve_ssubwt, INS_sve_usubwt, INS_sve_ssubwt, INS_sve_usubwt, INS_sve_ssubwt, INS_sve_usubwt, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, SubtractWideningLower, -1, 2, true, {INS_invalid, INS_invalid, INS_sve_ssublb, INS_sve_usublb, INS_sve_ssublb, INS_sve_usublb, INS_sve_ssublb, INS_sve_usublb, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, SubtractWideningLowerUpper, -1, 2, true, {INS_invalid, INS_invalid, INS_sve_ssublbt, INS_invalid, INS_sve_ssublbt, INS_invalid, INS_sve_ssublbt, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, SubtractWideningUpper, -1, 2, true, {INS_invalid, INS_invalid, INS_sve_ssublt, INS_sve_usublt, INS_sve_ssublt, INS_sve_usublt, INS_sve_ssublt, INS_sve_usublt, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, SubtractWideningUpperLower, -1, 2, true, {INS_invalid, INS_invalid, INS_sve_ssubltb, INS_invalid, INS_sve_ssubltb, INS_invalid, INS_sve_ssubltb, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, SubtractWithBorrowWideningLower, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_sbclb, INS_invalid, INS_sve_sbclb, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, SubtractWithBorrowWideningUpper, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_sbclt, INS_invalid, INS_sve_sbclt, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, UpConvertWideningUpper, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fcvtlt}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, VectorTableLookup, -1, 2, true, {INS_sve_tbl, INS_sve_tbl, INS_sve_tbl, INS_sve_tbl, INS_sve_tbl, INS_sve_tbl, INS_sve_tbl, INS_sve_tbl, INS_sve_tbl, INS_sve_tbl}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, VectorTableLookupExtension, -1, 3, true, {INS_sve_tbx, INS_sve_tbx, INS_sve_tbx, INS_sve_tbx, INS_sve_tbx, INS_sve_tbx, INS_sve_tbx, INS_sve_tbx, INS_sve_tbx, INS_sve_tbx}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(Sve2, Xor, -1, 3, true, {INS_sve_eor3, INS_sve_eor3, INS_sve_eor3, INS_sve_eor3, INS_sve_eor3, INS_sve_eor3, INS_sve_eor3, INS_sve_eor3, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sve2, XorRotateRight, -1, 3, true, {INS_sve_xar, INS_sve_xar, INS_sve_xar, INS_sve_xar, INS_sve_xar, INS_sve_xar, INS_sve_xar, INS_sve_xar, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) + + +// *************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************** +// ISA Function name SIMD size NumArg EncodesExtraTypeArg Instructions Category Flags +// {TYP_BYTE, TYP_UBYTE, TYP_SHORT, TYP_USHORT, TYP_INT, TYP_UINT, TYP_LONG, TYP_ULONG, TYP_FLOAT, TYP_DOUBLE} +// *************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************** +// SveBf16 +HARDWARE_INTRINSIC(SveBf16, Bfloat16DotProduct, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_bfdot, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(SveBf16, Bfloat16MatrixMultiplyAccumulate, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_bfmmla, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(SveBf16, Bfloat16MultiplyAddWideningToSinglePrecisionLower, -1, 4, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_bfmlalb, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(SveBf16, Bfloat16MultiplyAddWideningToSinglePrecisionUpper, -1, 4, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_bfmlalt, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(SveBf16, ConcatenateEvenInt128FromTwoInputs, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, ConcatenateOddInt128FromTwoInputs, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, ConditionalExtractAfterLastActiveElement, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(SveBf16, ConditionalExtractAfterLastActiveElementAndReplicate, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, ConditionalExtractLastActiveElement, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(SveBf16, ConditionalExtractLastActiveElementAndReplicate, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, ConditionalSelect, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, ConvertToBFloat16, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(SveBf16, CreateFalseMaskBFloat16, -1, 0, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, CreateTrueMaskBFloat16, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, CreateWhileReadAfterWriteMask, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, CreateWhileWriteAfterReadMask, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, DotProductBySelectedScalar, -1, 4, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_bfdot, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(SveBf16, DownConvertNarrowingUpper, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(SveBf16, DuplicateSelectedScalarToVector, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, ExtractAfterLastScalar, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, ExtractAfterLastVector, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, ExtractLastScalar, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, ExtractLastVector, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, ExtractVector, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, GetActiveElementCount, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, InsertIntoShiftedVector, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, InterleaveEvenInt128FromTwoInputs, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, InterleaveInt128FromHighHalvesOfTwoInputs, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, InterleaveInt128FromLowHalvesOfTwoInputs, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, InterleaveOddInt128FromTwoInputs, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, LoadVector, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, LoadVector128AndReplicateToVector, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, LoadVector256AndReplicateToVector, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, LoadVectorFirstFaulting, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, LoadVectorNonFaulting, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, LoadVectorNonTemporal, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, LoadVectorx2, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_MultiReg) +HARDWARE_INTRINSIC(SveBf16, LoadVectorx3, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_MultiReg) +HARDWARE_INTRINSIC(SveBf16, LoadVectorx4, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_MultiReg) +HARDWARE_INTRINSIC(SveBf16, PopCount, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_sve_cnt, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(SveBf16, ReverseElement, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, Splice, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, Store, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(SveBf16, StoreNonTemporal, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, TransposeEven, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, TransposeOdd, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, UnzipEven, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, UnzipOdd, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, VectorTableLookup, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(SveBf16, VectorTableLookupExtension, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(SveBf16, ZipHigh, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBf16, ZipLow, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) + + +// *************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************** +// ISA Function name SIMD size NumArg EncodesExtraTypeArg Instructions Category Flags +// {TYP_BYTE, TYP_UBYTE, TYP_SHORT, TYP_USHORT, TYP_INT, TYP_UINT, TYP_LONG, TYP_ULONG, TYP_FLOAT, TYP_DOUBLE} +// *************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************** +// SveF32mm +HARDWARE_INTRINSIC(SveF32mm, MatrixMultiplyAccumulate, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fmmla, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) + + +// *************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************** +// ISA Function name SIMD size NumArg EncodesExtraTypeArg Instructions Category Flags +// {TYP_BYTE, TYP_UBYTE, TYP_SHORT, TYP_USHORT, TYP_INT, TYP_UINT, TYP_LONG, TYP_ULONG, TYP_FLOAT, TYP_DOUBLE} +// *************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************** +// SveF64mm +HARDWARE_INTRINSIC(SveF64mm, ConcatenateEvenInt128FromTwoInputs, -1, 2, true, {INS_sve_uzp1, INS_sve_uzp1, INS_sve_uzp1, INS_sve_uzp1, INS_sve_uzp1, INS_sve_uzp1, INS_sve_uzp1, INS_sve_uzp1, INS_sve_uzp1, INS_sve_uzp1}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveF64mm, ConcatenateOddInt128FromTwoInputs, -1, 2, true, {INS_sve_uzp2, INS_sve_uzp2, INS_sve_uzp2, INS_sve_uzp2, INS_sve_uzp2, INS_sve_uzp2, INS_sve_uzp2, INS_sve_uzp2, INS_sve_uzp2, INS_sve_uzp2}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveF64mm, InterleaveEvenInt128FromTwoInputs, -1, 2, true, {INS_sve_trn1, INS_sve_trn1, INS_sve_trn1, INS_sve_trn1, INS_sve_trn1, INS_sve_trn1, INS_sve_trn1, INS_sve_trn1, INS_sve_trn1, INS_sve_trn1}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveF64mm, InterleaveInt128FromHighHalvesOfTwoInputs, -1, 2, true, {INS_sve_zip2, INS_sve_zip2, INS_sve_zip2, INS_sve_zip2, INS_sve_zip2, INS_sve_zip2, INS_sve_zip2, INS_sve_zip2, INS_sve_zip2, INS_sve_zip2}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveF64mm, InterleaveInt128FromLowHalvesOfTwoInputs, -1, 2, true, {INS_sve_zip1, INS_sve_zip1, INS_sve_zip1, INS_sve_zip1, INS_sve_zip1, INS_sve_zip1, INS_sve_zip1, INS_sve_zip1, INS_sve_zip1, INS_sve_zip1}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveF64mm, InterleaveOddInt128FromTwoInputs, -1, 2, true, {INS_sve_trn2, INS_sve_trn2, INS_sve_trn2, INS_sve_trn2, INS_sve_trn2, INS_sve_trn2, INS_sve_trn2, INS_sve_trn2, INS_sve_trn2, INS_sve_trn2}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveF64mm, LoadVector256AndReplicateToVector, -1, 2, true, {INS_sve_ld1rob, INS_sve_ld1rob, INS_sve_ld1roh, INS_sve_ld1roh, INS_sve_ld1row, INS_sve_ld1row, INS_sve_ld1rod, INS_sve_ld1rod, INS_sve_ld1row, INS_sve_ld1rod}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveF64mm, MatrixMultiplyAccumulate, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fmmla}, HW_Category_SIMD, HW_Flag_NoFlag) + + +// *************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************** +// ISA Function name SIMD size NumArg EncodesExtraTypeArg Instructions Category Flags +// {TYP_BYTE, TYP_UBYTE, TYP_SHORT, TYP_USHORT, TYP_INT, TYP_UINT, TYP_LONG, TYP_ULONG, TYP_FLOAT, TYP_DOUBLE} +// *************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************** +// SveFp16 +HARDWARE_INTRINSIC(SveFp16, Abs, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, AbsoluteCompareGreaterThan, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, AbsoluteCompareGreaterThanOrEqual, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, AbsoluteCompareLessThan, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, AbsoluteCompareLessThanOrEqual, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, AbsoluteDifference, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, Add, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, AddAcross, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, AddPairwise, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, AddRotateComplex, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, AddSequentialAcross, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, CompareEqual, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, CompareGreaterThan, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, CompareGreaterThanOrEqual, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, CompareLessThan, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, CompareLessThanOrEqual, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, CompareNotEqualTo, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, CompareUnordered, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, ConcatenateEvenInt128FromTwoInputs, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, ConcatenateOddInt128FromTwoInputs, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, ConditionalExtractAfterLastActiveElement, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(SveFp16, ConditionalExtractAfterLastActiveElementAndReplicate, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, ConditionalExtractLastActiveElement, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(SveFp16, ConditionalExtractLastActiveElementAndReplicate, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, ConditionalSelect, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, ConvertToDouble, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fcvt}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(SveFp16, ConvertToHalf, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(SveFp16, ConvertToInt16, -1, 1, false, {INS_invalid, INS_invalid, INS_sve_fcvtzs, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(SveFp16, ConvertToInt32, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fcvtzs, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(SveFp16, ConvertToInt64, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fcvtzs, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(SveFp16, ConvertToSingle, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fcvt, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(SveFp16, ConvertToUInt16, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_sve_fcvtzu, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(SveFp16, ConvertToUInt32, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fcvtzu, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(SveFp16, ConvertToUInt64, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fcvtzu, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(SveFp16, CreateFalseMaskHalf, -1, 0, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, CreateTrueMaskHalf, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, CreateWhileReadAfterWriteMask, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, CreateWhileWriteAfterReadMask, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, Divide, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, DownConvertNarrowingUpper, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(SveFp16, DuplicateSelectedScalarToVector, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, ExtractAfterLastScalar, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, ExtractAfterLastVector, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, ExtractLastScalar, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, ExtractLastVector, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, ExtractVector, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, FloatingPointExponentialAccelerator, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(SveFp16, FusedMultiplyAdd, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, FusedMultiplyAddBySelectedScalar, -1, 4, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, FusedMultiplyAddNegated, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, FusedMultiplySubtract, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, FusedMultiplySubtractBySelectedScalar, -1, 4, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, FusedMultiplySubtractNegated, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, GetActiveElementCount, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, InsertIntoShiftedVector, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, InterleaveEvenInt128FromTwoInputs, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, InterleaveInt128FromHighHalvesOfTwoInputs, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, InterleaveInt128FromLowHalvesOfTwoInputs, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, InterleaveOddInt128FromTwoInputs, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, LoadVector, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, LoadVector128AndReplicateToVector, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, LoadVector256AndReplicateToVector, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, LoadVectorFirstFaulting, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, LoadVectorNonFaulting, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, LoadVectorNonTemporal, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, LoadVectorx2, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_MultiReg) +HARDWARE_INTRINSIC(SveFp16, LoadVectorx3, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_MultiReg) +HARDWARE_INTRINSIC(SveFp16, LoadVectorx4, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_MultiReg) +HARDWARE_INTRINSIC(SveFp16, Log2, -1, 1, false, {INS_invalid, INS_invalid, INS_sve_flogb, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(SveFp16, Max, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, MaxAcross, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, MaxNumber, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, MaxNumberAcross, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, MaxNumberPairwise, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, MaxPairwise, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, Min, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, MinAcross, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, MinNumber, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, MinNumberAcross, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, MinNumberPairwise, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, MinPairwise, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, Multiply, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, MultiplyAddRotateComplex, -1, 4, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, MultiplyAddRotateComplexBySelectedScalar, -1, 5, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, MultiplyAddWideningLower, -1, 4, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fmlalb, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(SveFp16, MultiplyAddWideningUpper, -1, 4, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fmlalt, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(SveFp16, MultiplyBySelectedScalar, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, MultiplyExtended, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, MultiplySubtractWideningLower, -1, 4, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fmlslb, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(SveFp16, MultiplySubtractWideningUpper, -1, 4, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fmlslt, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(SveFp16, Negate, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, PopCount, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_sve_cnt, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(SveFp16, ReciprocalEstimate, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, ReciprocalExponent, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, ReciprocalSqrtEstimate, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, ReciprocalSqrtStep, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, ReciprocalStep, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, ReverseElement, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, RoundAwayFromZero, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, RoundToNearest, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, RoundToNegativeInfinity, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, RoundToPositiveInfinity, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, RoundToZero, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, Scale, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(SveFp16, Splice, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, Sqrt, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, Store, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(SveFp16, StoreNonTemporal, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, Subtract, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, TransposeEven, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, TransposeOdd, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, TrigonometricMultiplyAddCoefficient, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, TrigonometricSelectCoefficient, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(SveFp16, TrigonometricStartingValue, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(SveFp16, UnzipEven, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, UnzipOdd, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, UpConvertWideningUpper, -1, 1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_fcvtlt, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(SveFp16, VectorTableLookup, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(SveFp16, VectorTableLookupExtension, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(SveFp16, ZipHigh, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveFp16, ZipLow, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) + + +// *************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************** +// ISA Function name SIMD size NumArg EncodesExtraTypeArg Instructions Category Flags +// {TYP_BYTE, TYP_UBYTE, TYP_SHORT, TYP_USHORT, TYP_INT, TYP_UINT, TYP_LONG, TYP_ULONG, TYP_FLOAT, TYP_DOUBLE} +// *************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************** +// SveI8mm +HARDWARE_INTRINSIC(SveI8mm, DotProductSignedUnsigned, -1, 4, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_usdot/INS_sve_sudot,INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(SveI8mm, DotProductUnsignedSigned, -1, 4, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_usdot, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen) +HARDWARE_INTRINSIC(SveI8mm, MatrixMultiplyAccumulate, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_smmla, INS_sve_ummla, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) +HARDWARE_INTRINSIC(SveI8mm, MatrixMultiplyAccumulateUnsignedSigned, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_usmmla, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_BaseTypeFromFirstArg) + + +// *************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************** +// ISA Function name SIMD size NumArg EncodesExtraTypeArg Instructions Category Flags +// {TYP_BYTE, TYP_UBYTE, TYP_SHORT, TYP_USHORT, TYP_INT, TYP_UINT, TYP_LONG, TYP_ULONG, TYP_FLOAT, TYP_DOUBLE} +// *************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************** +// Sha3 +HARDWARE_INTRINSIC(Sha3, BitwiseClearXor, -1, 3, true, {INS_sve_bcax, INS_sve_bcax, INS_sve_bcax, INS_sve_bcax, INS_sve_bcax, INS_sve_bcax, INS_sve_bcax, INS_sve_bcax, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sha3, BitwiseRotateLeftBy1AndXor, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_rax1, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sha3, Xor, -1, 3, true, {INS_sve_eor3, INS_sve_eor3, INS_sve_eor3, INS_sve_eor3, INS_sve_eor3, INS_sve_eor3, INS_sve_eor3, INS_sve_eor3, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sha3, XorRotateRight, -1, 3, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_xar, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) + + +// *************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************** +// ISA Function name SIMD size NumArg EncodesExtraTypeArg Instructions Category Flags +// {TYP_BYTE, TYP_UBYTE, TYP_SHORT, TYP_USHORT, TYP_INT, TYP_UINT, TYP_LONG, TYP_ULONG, TYP_FLOAT, TYP_DOUBLE} +// *************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************** +// Sm4 +HARDWARE_INTRINSIC(Sm4, Sm4EncryptionAndDecryption, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_sm4e, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(Sm4, Sm4KeyUpdates, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_sm4ekey, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) + + +// *************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************** +// ISA Function name SIMD size NumArg EncodesExtraTypeArg Instructions Category Flags +// {TYP_BYTE, TYP_UBYTE, TYP_SHORT, TYP_USHORT, TYP_INT, TYP_UINT, TYP_LONG, TYP_ULONG, TYP_FLOAT, TYP_DOUBLE} +// *************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************** +// SveAes +HARDWARE_INTRINSIC(SveAes, AesInverseMixColumns, -1, 1, false, {INS_invalid, INS_sve_aesimc, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveAes, AesMixColumns, -1, 1, false, {INS_invalid, INS_sve_aesmc, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveAes, AesSingleRoundDecryption, -1, 2, false, {INS_invalid, INS_sve_aesd, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveAes, AesSingleRoundEncryption, -1, 2, false, {INS_invalid, INS_sve_aese, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveAes, PolynomialMultiplyWideningLower, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_pmullb, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveAes, PolynomialMultiplyWideningUpper, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_pmullt, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) + + +// *************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************** +// ISA Function name SIMD size NumArg EncodesExtraTypeArg Instructions Category Flags +// {TYP_BYTE, TYP_UBYTE, TYP_SHORT, TYP_USHORT, TYP_INT, TYP_UINT, TYP_LONG, TYP_ULONG, TYP_FLOAT, TYP_DOUBLE} +// *************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************** +// SveBitperm +HARDWARE_INTRINSIC(SveBitperm, GatherLowerBitsFromPositionsSelectedByBitmask, -1, 2, true, {INS_invalid, INS_sve_bext, INS_invalid, INS_sve_bext, INS_invalid, INS_sve_bext, INS_invalid, INS_sve_bext, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBitperm, GroupBitsToRightOrLeftAsSelectedByBitmask, -1, 2, true, {INS_invalid, INS_sve_bgrp, INS_invalid, INS_sve_bgrp, INS_invalid, INS_sve_bgrp, INS_invalid, INS_sve_bgrp, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveBitperm, ScatterLowerBitsIntoPositionsSelectedByBitmask, -1, 2, true, {INS_invalid, INS_sve_bdep, INS_invalid, INS_sve_bdep, INS_invalid, INS_sve_bdep, INS_invalid, INS_sve_bdep, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) + + +// *************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************** +// ISA Function name SIMD size NumArg EncodesExtraTypeArg Instructions Category Flags +// {TYP_BYTE, TYP_UBYTE, TYP_SHORT, TYP_USHORT, TYP_INT, TYP_UINT, TYP_LONG, TYP_ULONG, TYP_FLOAT, TYP_DOUBLE} +// *************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************** +// SveSha3 +HARDWARE_INTRINSIC(SveSha3, BitwiseRotateLeftBy1AndXor, -1, 2, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_rax1, INS_sve_rax1, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) + + +// *************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************** +// ISA Function name SIMD size NumArg EncodesExtraTypeArg Instructions Category Flags +// {TYP_BYTE, TYP_UBYTE, TYP_SHORT, TYP_USHORT, TYP_INT, TYP_UINT, TYP_LONG, TYP_ULONG, TYP_FLOAT, TYP_DOUBLE} +// *************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************** +// SveSm4 +HARDWARE_INTRINSIC(SveSm4, Sm4EncryptionAndDecryption, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_sm4e, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) +HARDWARE_INTRINSIC(SveSm4, Sm4KeyUpdates, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_sm4ekey, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_NoFlag) + + +#endif // FEATURE_HW_INTRINSIC + +#undef HARDWARE_INTRINSIC + +// clang-format on + diff --git a/sve_api/out_markdownlist.md b/sve_api/out_markdownlist.md new file mode 100644 index 0000000000000..dc515a02b27f0 --- /dev/null +++ b/sve_api/out_markdownlist.md @@ -0,0 +1,734 @@ + +### [Sve stores](https://github.com/dotnet/runtime/issues/94011) +- [ ] Store +- [ ] StoreNarrowing +- [ ] StoreNonTemporal + + +### [Sve scatterstores](https://github.com/dotnet/runtime/issues/94014) +- [ ] Scatter +- [ ] Scatter16BitNarrowing +- [ ] Scatter16BitWithByteOffsetsNarrowing +- [ ] Scatter32BitNarrowing +- [ ] Scatter32BitWithByteOffsetsNarrowing +- [ ] Scatter8BitNarrowing +- [ ] Scatter8BitWithByteOffsetsNarrowing + + +### [Sve maths](https://github.com/dotnet/runtime/issues/94009) +- [ ] Abs +- [ ] AbsoluteDifference +- [ ] Add +- [ ] AddAcross +- [ ] AddSaturate +- [ ] Divide +- [ ] DotProduct +- [ ] DotProductBySelectedScalar +- [ ] FusedMultiplyAdd +- [ ] FusedMultiplyAddBySelectedScalar +- [ ] FusedMultiplyAddNegated +- [ ] FusedMultiplySubtract +- [ ] FusedMultiplySubtractBySelectedScalar +- [ ] FusedMultiplySubtractNegated +- [ ] Max +- [ ] MaxAcross +- [ ] MaxNumber +- [ ] MaxNumberAcross +- [ ] Min +- [ ] MinAcross +- [ ] MinNumber +- [ ] MinNumberAcross +- [ ] Multiply +- [ ] MultiplyAdd +- [ ] MultiplyBySelectedScalar +- [ ] MultiplyExtended +- [ ] MultiplySubtract +- [ ] Negate +- [ ] SignExtend16 +- [ ] SignExtend32 +- [ ] SignExtend8 +- [ ] SignExtendWideningLower +- [ ] SignExtendWideningUpper +- [ ] Subtract +- [ ] SubtractSaturate +- [ ] ZeroExtend16 +- [ ] ZeroExtend32 +- [ ] ZeroExtend8 +- [ ] ZeroExtendWideningLower +- [ ] ZeroExtendWideningUpper + + +### [Sve mask](https://github.com/dotnet/runtime/issues/93964) +- [ ] AbsoluteCompareGreaterThan +- [ ] AbsoluteCompareGreaterThanOrEqual +- [ ] AbsoluteCompareLessThan +- [ ] AbsoluteCompareLessThanOrEqual +- [ ] Compact +- [ ] CompareEqual +- [ ] CompareGreaterThan +- [ ] CompareGreaterThanOrEqual +- [ ] CompareLessThan +- [ ] CompareLessThanOrEqual +- [ ] CompareNotEqualTo +- [ ] CompareUnordered +- [ ] ConditionalExtractAfterLastActiveElement +- [ ] ConditionalExtractAfterLastActiveElementAndReplicate +- [ ] ConditionalExtractLastActiveElement +- [ ] ConditionalExtractLastActiveElementAndReplicate +- [ ] ConditionalSelect +- [ ] CreateBreakAfterMask +- [ ] CreateBreakAfterPropagateMask +- [ ] CreateBreakBeforeMask +- [ ] CreateBreakBeforePropagateMask +- [ ] CreateBreakPropagateMask +- [ ] CreateFalseMaskByte +- [ ] CreateFalseMaskDouble +- [ ] CreateFalseMaskInt16 +- [ ] CreateFalseMaskInt32 +- [ ] CreateFalseMaskInt64 +- [ ] CreateFalseMaskSByte +- [ ] CreateFalseMaskSingle +- [ ] CreateFalseMaskUInt16 +- [ ] CreateFalseMaskUInt32 +- [ ] CreateFalseMaskUInt64 +- [ ] CreateMaskForFirstActiveElement +- [ ] CreateMaskForNextActiveElement +- [ ] CreateTrueMaskByte +- [ ] CreateTrueMaskDouble +- [ ] CreateTrueMaskInt16 +- [ ] CreateTrueMaskInt32 +- [ ] CreateTrueMaskInt64 +- [ ] CreateTrueMaskSByte +- [ ] CreateTrueMaskSingle +- [ ] CreateTrueMaskUInt16 +- [ ] CreateTrueMaskUInt32 +- [ ] CreateTrueMaskUInt64 +- [ ] CreateWhileLessThanMask16Bit +- [ ] CreateWhileLessThanMask32Bit +- [ ] CreateWhileLessThanMask64Bit +- [ ] CreateWhileLessThanMask8Bit +- [ ] CreateWhileLessThanOrEqualMask16Bit +- [ ] CreateWhileLessThanOrEqualMask32Bit +- [ ] CreateWhileLessThanOrEqualMask64Bit +- [ ] CreateWhileLessThanOrEqualMask8Bit +- [ ] ExtractAfterLastScalar +- [ ] ExtractAfterLastVector +- [ ] ExtractLastScalar +- [ ] ExtractLastVector +- [ ] ExtractVector +- [ ] TestAnyTrue +- [ ] TestFirstTrue +- [ ] TestLastTrue + + +### [Sve loads](https://github.com/dotnet/runtime/issues/94006) +- [ ] Compute16BitAddresses +- [ ] Compute32BitAddresses +- [ ] Compute64BitAddresses +- [ ] Compute8BitAddresses +- [ ] LoadVector +- [ ] LoadVector128AndReplicateToVector +- [ ] LoadVectorByteNonFaultingZeroExtendToInt16 +- [ ] LoadVectorByteNonFaultingZeroExtendToInt32 +- [ ] LoadVectorByteNonFaultingZeroExtendToInt64 +- [ ] LoadVectorByteNonFaultingZeroExtendToUInt16 +- [ ] LoadVectorByteNonFaultingZeroExtendToUInt32 +- [ ] LoadVectorByteNonFaultingZeroExtendToUInt64 +- [ ] LoadVectorByteZeroExtendToInt16 +- [ ] LoadVectorByteZeroExtendToInt32 +- [ ] LoadVectorByteZeroExtendToInt64 +- [ ] LoadVectorByteZeroExtendToUInt16 +- [ ] LoadVectorByteZeroExtendToUInt32 +- [ ] LoadVectorByteZeroExtendToUInt64 +- [ ] LoadVectorInt16NonFaultingSignExtendToInt32 +- [ ] LoadVectorInt16NonFaultingSignExtendToInt64 +- [ ] LoadVectorInt16NonFaultingSignExtendToUInt32 +- [ ] LoadVectorInt16NonFaultingSignExtendToUInt64 +- [ ] LoadVectorInt16SignExtendToInt32 +- [ ] LoadVectorInt16SignExtendToInt64 +- [ ] LoadVectorInt16SignExtendToUInt32 +- [ ] LoadVectorInt16SignExtendToUInt64 +- [ ] LoadVectorInt32NonFaultingSignExtendToInt64 +- [ ] LoadVectorInt32NonFaultingSignExtendToUInt64 +- [ ] LoadVectorInt32SignExtendToInt64 +- [ ] LoadVectorInt32SignExtendToUInt64 +- [ ] LoadVectorNonFaulting +- [ ] LoadVectorNonTemporal +- [ ] LoadVectorSByteNonFaultingSignExtendToInt16 +- [ ] LoadVectorSByteNonFaultingSignExtendToInt32 +- [ ] LoadVectorSByteNonFaultingSignExtendToInt64 +- [ ] LoadVectorSByteNonFaultingSignExtendToUInt16 +- [ ] LoadVectorSByteNonFaultingSignExtendToUInt32 +- [ ] LoadVectorSByteNonFaultingSignExtendToUInt64 +- [ ] LoadVectorSByteSignExtendToInt16 +- [ ] LoadVectorSByteSignExtendToInt32 +- [ ] LoadVectorSByteSignExtendToInt64 +- [ ] LoadVectorSByteSignExtendToUInt16 +- [ ] LoadVectorSByteSignExtendToUInt32 +- [ ] LoadVectorSByteSignExtendToUInt64 +- [ ] LoadVectorUInt16NonFaultingZeroExtendToInt32 +- [ ] LoadVectorUInt16NonFaultingZeroExtendToInt64 +- [ ] LoadVectorUInt16NonFaultingZeroExtendToUInt32 +- [ ] LoadVectorUInt16NonFaultingZeroExtendToUInt64 +- [ ] LoadVectorUInt16ZeroExtendToInt32 +- [ ] LoadVectorUInt16ZeroExtendToInt64 +- [ ] LoadVectorUInt16ZeroExtendToUInt32 +- [ ] LoadVectorUInt16ZeroExtendToUInt64 +- [ ] LoadVectorUInt32NonFaultingZeroExtendToInt64 +- [ ] LoadVectorUInt32NonFaultingZeroExtendToUInt64 +- [ ] LoadVectorUInt32ZeroExtendToInt64 +- [ ] LoadVectorUInt32ZeroExtendToUInt64 +- [ ] LoadVectorx2 +- [ ] LoadVectorx3 +- [ ] LoadVectorx4 +- [ ] PrefetchBytes +- [ ] PrefetchInt16 +- [ ] PrefetchInt32 +- [ ] PrefetchInt64 + + +### [Sve gatherloads](https://github.com/dotnet/runtime/issues/94007) +- [ ] GatherPrefetch16Bit +- [ ] GatherPrefetch32Bit +- [ ] GatherPrefetch64Bit +- [ ] GatherPrefetch8Bit +- [ ] GatherVector +- [ ] GatherVectorByteZeroExtend +- [ ] GatherVectorInt16SignExtend +- [ ] GatherVectorInt16WithByteOffsetsSignExtend +- [ ] GatherVectorInt32SignExtend +- [ ] GatherVectorInt32WithByteOffsetsSignExtend +- [ ] GatherVectorSByteSignExtend +- [ ] GatherVectorUInt16WithByteOffsetsZeroExtend +- [ ] GatherVectorUInt16ZeroExtend +- [ ] GatherVectorUInt32WithByteOffsetsZeroExtend +- [ ] GatherVectorUInt32ZeroExtend +- [ ] GatherVectorWithByteOffsets + + +### [Sve fp](https://github.com/dotnet/runtime/issues/94005) +- [ ] AddRotateComplex +- [ ] AddSequentialAcross +- [ ] ConvertToDouble +- [ ] ConvertToInt32 +- [ ] ConvertToInt64 +- [ ] ConvertToSingle +- [ ] ConvertToUInt32 +- [ ] ConvertToUInt64 +- [ ] FloatingPointExponentialAccelerator +- [ ] MultiplyAddRotateComplex +- [ ] MultiplyAddRotateComplexBySelectedScalar +- [ ] ReciprocalEstimate +- [ ] ReciprocalExponent +- [ ] ReciprocalSqrtEstimate +- [ ] ReciprocalSqrtStep +- [ ] ReciprocalStep +- [ ] RoundAwayFromZero +- [ ] RoundToNearest +- [ ] RoundToNegativeInfinity +- [ ] RoundToPositiveInfinity +- [ ] RoundToZero +- [ ] Scale +- [ ] Sqrt +- [ ] TrigonometricMultiplyAddCoefficient +- [ ] TrigonometricSelectCoefficient +- [ ] TrigonometricStartingValue + + +### [Sve firstfaulting](https://github.com/dotnet/runtime/issues/94004) +- [ ] GatherVectorByteZeroExtendFirstFaulting +- [ ] GatherVectorFirstFaulting +- [ ] GatherVectorInt16SignExtendFirstFaulting +- [ ] GatherVectorInt16WithByteOffsetsSignExtendFirstFaulting +- [ ] GatherVectorInt32SignExtendFirstFaulting +- [ ] GatherVectorInt32WithByteOffsetsSignExtendFirstFaulting +- [ ] GatherVectorSByteSignExtendFirstFaulting +- [ ] GatherVectorUInt16WithByteOffsetsZeroExtendFirstFaulting +- [ ] GatherVectorUInt16ZeroExtendFirstFaulting +- [ ] GatherVectorUInt32WithByteOffsetsZeroExtendFirstFaulting +- [ ] GatherVectorUInt32ZeroExtendFirstFaulting +- [ ] GatherVectorWithByteOffsetFirstFaulting +- [ ] GetFfr +- [ ] LoadVectorByteZeroExtendFirstFaulting +- [ ] LoadVectorFirstFaulting +- [ ] LoadVectorInt16SignExtendFirstFaulting +- [ ] LoadVectorInt32SignExtendFirstFaulting +- [ ] LoadVectorSByteSignExtendFirstFaulting +- [ ] LoadVectorUInt16ZeroExtendFirstFaulting +- [ ] LoadVectorUInt32ZeroExtendFirstFaulting +- [ ] SetFfr + + +### [Sve counting](https://github.com/dotnet/runtime/issues/94003) +- [ ] Count16BitElements +- [ ] Count32BitElements +- [ ] Count64BitElements +- [ ] Count8BitElements +- [ ] GetActiveElementCount +- [ ] LeadingSignCount +- [ ] LeadingZeroCount +- [ ] PopCount +- [ ] SaturatingDecrementBy16BitElementCount +- [ ] SaturatingDecrementBy32BitElementCount +- [ ] SaturatingDecrementBy64BitElementCount +- [ ] SaturatingDecrementBy8BitElementCount +- [ ] SaturatingDecrementByActiveElementCount +- [ ] SaturatingIncrementBy16BitElementCount +- [ ] SaturatingIncrementBy32BitElementCount +- [ ] SaturatingIncrementBy64BitElementCount +- [ ] SaturatingIncrementBy8BitElementCount +- [ ] SaturatingIncrementByActiveElementCount + + +### [Sve bitwise](https://github.com/dotnet/runtime/issues/93887) +- [ ] And +- [ ] AndAcross +- [ ] AndNot +- [ ] BitwiseClear +- [ ] BooleanNot +- [ ] InsertIntoShiftedVector +- [ ] Not +- [ ] Or +- [ ] OrAcross +- [ ] OrNot +- [ ] ShiftLeftLogical +- [ ] ShiftRightArithmetic +- [ ] ShiftRightArithmeticForDivide +- [ ] ShiftRightLogical +- [ ] Xor +- [ ] XorAcross + + +### [Sve bitmanipulate](https://github.com/dotnet/runtime/issues/94008) +- [ ] DuplicateSelectedScalarToVector +- [ ] ReverseBits +- [ ] ReverseElement +- [ ] ReverseElement16 +- [ ] ReverseElement32 +- [ ] ReverseElement8 +- [ ] Splice +- [ ] TransposeEven +- [ ] TransposeOdd +- [ ] UnzipEven +- [ ] UnzipOdd +- [ ] VectorTableLookup +- [ ] ZipHigh +- [ ] ZipLow + + +### [Sve2 scatterstores](https://github.com/dotnet/runtime/issues/94023) +- [ ] Scatter16BitNarrowing +- [ ] Scatter16BitWithByteOffsetsNarrowing +- [ ] Scatter32BitNarrowing +- [ ] Scatter32BitWithByteOffsetsNarrowing +- [ ] Scatter8BitNarrowing +- [ ] Scatter8BitWithByteOffsetsNarrowing +- [ ] ScatterNonTemporal + + +### [Sve2 maths](https://github.com/dotnet/runtime/issues/94022) +- [ ] AbsoluteDifferenceAdd +- [ ] AbsoluteDifferenceAddWideningLower +- [ ] AbsoluteDifferenceAddWideningUpper +- [ ] AbsoluteDifferenceWideningLower +- [ ] AbsoluteDifferenceWideningUpper +- [ ] AddCarryWideningLower +- [ ] AddCarryWideningUpper +- [ ] AddHighNarowingLower +- [ ] AddHighNarowingUpper +- [ ] AddPairwise +- [ ] AddPairwiseWidening +- [ ] AddSaturate +- [ ] AddSaturateWithSignedAddend +- [ ] AddSaturateWithUnsignedAddend +- [ ] AddWideLower +- [ ] AddWideUpper +- [ ] AddWideningLower +- [ ] AddWideningLowerUpper +- [ ] AddWideningUpper +- [ ] DotProductComplex +- [ ] HalvingAdd +- [ ] HalvingSubtract +- [ ] HalvingSubtractReversed +- [ ] MaxNumberPairwise +- [ ] MaxPairwise +- [ ] MinNumberPairwise +- [ ] MinPairwise +- [ ] MultiplyAddBySelectedScalar +- [ ] MultiplyAddWideningLower +- [ ] MultiplyAddWideningUpper +- [ ] MultiplyBySelectedScalar +- [ ] MultiplySubtractBySelectedScalar +- [ ] MultiplySubtractWideningLower +- [ ] MultiplySubtractWideningUpper +- [ ] MultiplyWideningLower +- [ ] MultiplyWideningUpper +- [ ] PolynomialMultiply +- [ ] PolynomialMultiplyWideningLower +- [ ] PolynomialMultiplyWideningUpper +- [ ] RoundingAddHighNarowingLower +- [ ] RoundingAddHighNarowingUpper +- [ ] RoundingHalvingAdd +- [ ] RoundingSubtractHighNarowingLower +- [ ] RoundingSubtractHighNarowingUpper +- [ ] SaturatingAbs +- [ ] SaturatingDoublingMultiplyAddWideningLower +- [ ] SaturatingDoublingMultiplyAddWideningLowerUpper +- [ ] SaturatingDoublingMultiplyAddWideningUpper +- [ ] SaturatingDoublingMultiplyHigh +- [ ] SaturatingDoublingMultiplySubtractWideningLower +- [ ] SaturatingDoublingMultiplySubtractWideningLowerUpper +- [ ] SaturatingDoublingMultiplySubtractWideningUpper +- [ ] SaturatingDoublingMultiplyWideningLower +- [ ] SaturatingDoublingMultiplyWideningUpper +- [ ] SaturatingNegate +- [ ] SaturatingRoundingDoublingMultiplyAddHigh +- [ ] SaturatingRoundingDoublingMultiplyHigh +- [ ] SaturatingRoundingDoublingMultiplySubtractHigh +- [ ] SubtractHighNarowingLower +- [ ] SubtractHighNarowingUpper +- [ ] SubtractSaturate +- [ ] SubtractSaturateReversed +- [ ] SubtractWideLower +- [ ] SubtractWideUpper +- [ ] SubtractWideningLower +- [ ] SubtractWideningLowerUpper +- [ ] SubtractWideningUpper +- [ ] SubtractWideningUpperLower +- [ ] SubtractWithBorrowWideningLower +- [ ] SubtractWithBorrowWideningUpper + + +### [Sve2 mask](https://github.com/dotnet/runtime/issues/94021) +- [ ] CreateWhileGreaterThanMask +- [ ] CreateWhileGreaterThanOrEqualMask +- [ ] CreateWhileReadAfterWriteMask +- [ ] CreateWhileWriteAfterReadMask +- [ ] Match +- [ ] NoMatch +- [ ] SaturatingExtractNarrowingLower +- [ ] SaturatingExtractNarrowingUpper +- [ ] SaturatingExtractUnsignedNarrowingLower +- [ ] SaturatingExtractUnsignedNarrowingUpper + + +### [Sve2 gatherloads](https://github.com/dotnet/runtime/issues/94019) +- [ ] GatherVectorByteZeroExtendNonTemporal +- [ ] GatherVectorInt16SignExtendNonTemporal +- [ ] GatherVectorInt16WithByteOffsetsSignExtendNonTemporal +- [ ] GatherVectorInt32SignExtendNonTemporal +- [ ] GatherVectorInt32WithByteOffsetsSignExtendNonTemporal +- [ ] GatherVectorNonTemporal +- [ ] GatherVectorSByteSignExtendNonTemporal +- [ ] GatherVectorUInt16WithByteOffsetsZeroExtendNonTemporal +- [ ] GatherVectorUInt16ZeroExtendNonTemporal +- [ ] GatherVectorUInt32WithByteOffsetsZeroExtendNonTemporal +- [ ] GatherVectorUInt32ZeroExtendNonTemporal + + +### [Sve2 fp](https://github.com/dotnet/runtime/issues/94018) +- [ ] AddRotateComplex +- [ ] DownConvertNarrowingUpper +- [ ] DownConvertRoundingOdd +- [ ] DownConvertRoundingOddUpper +- [ ] Log2 +- [ ] MultiplyAddRotateComplex +- [ ] MultiplyAddRotateComplexBySelectedScalar +- [ ] ReciprocalEstimate +- [ ] ReciprocalSqrtEstimate +- [ ] SaturatingComplexAddRotate +- [ ] SaturatingRoundingDoublingComplexMultiplyAddHighRotate +- [ ] UpConvertWideningUpper + + +### [Sve2 counting](https://github.com/dotnet/runtime/issues/94017) +- [ ] CountMatchingElements +- [ ] CountMatchingElementsIn128BitSegments + + +### [Sve2 bitwise](https://github.com/dotnet/runtime/issues/94015) +- [ ] BitwiseClearXor +- [ ] BitwiseSelect +- [ ] BitwiseSelectLeftInverted +- [ ] BitwiseSelectRightInverted +- [ ] ShiftArithmeticRounded +- [ ] ShiftArithmeticRoundedSaturate +- [ ] ShiftArithmeticSaturate +- [ ] ShiftLeftAndInsert +- [ ] ShiftLeftLogicalSaturate +- [ ] ShiftLeftLogicalSaturateUnsigned +- [ ] ShiftLeftLogicalWideningEven +- [ ] ShiftLeftLogicalWideningOdd +- [ ] ShiftLogicalRounded +- [ ] ShiftLogicalRoundedSaturate +- [ ] ShiftRightAndInsert +- [ ] ShiftRightArithmeticAdd +- [ ] ShiftRightArithmeticNarrowingSaturateEven +- [ ] ShiftRightArithmeticNarrowingSaturateOdd +- [ ] ShiftRightArithmeticNarrowingSaturateUnsignedEven +- [ ] ShiftRightArithmeticNarrowingSaturateUnsignedOdd +- [ ] ShiftRightArithmeticRounded +- [ ] ShiftRightArithmeticRoundedAdd +- [ ] ShiftRightArithmeticRoundedNarrowingSaturateEven +- [ ] ShiftRightArithmeticRoundedNarrowingSaturateOdd +- [ ] ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven +- [ ] ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd +- [ ] ShiftRightLogicalAdd +- [ ] ShiftRightLogicalNarrowingEven +- [ ] ShiftRightLogicalNarrowingOdd +- [ ] ShiftRightLogicalRounded +- [ ] ShiftRightLogicalRoundedAdd +- [ ] ShiftRightLogicalRoundedNarrowingEven +- [ ] ShiftRightLogicalRoundedNarrowingOdd +- [ ] ShiftRightLogicalRoundedNarrowingSaturateEven +- [ ] ShiftRightLogicalRoundedNarrowingSaturateOdd +- [ ] Xor +- [ ] XorRotateRight + + +### [Sve2 bitmanipulate](https://github.com/dotnet/runtime/issues/94020) +- [ ] InterleavingXorLowerUpper +- [ ] InterleavingXorUpperLower +- [ ] MoveWideningLower +- [ ] MoveWideningUpper +- [ ] VectorTableLookup +- [ ] VectorTableLookupExtension + + +### [SveBf16](https://github.com/dotnet/runtime/issues/94028) +- [ ] Bfloat16DotProduct +- [ ] Bfloat16MatrixMultiplyAccumulate +- [ ] Bfloat16MultiplyAddWideningToSinglePrecisionLower +- [ ] Bfloat16MultiplyAddWideningToSinglePrecisionUpper +- [ ] ConcatenateEvenInt128FromTwoInputs +- [ ] ConcatenateOddInt128FromTwoInputs +- [ ] ConditionalExtractAfterLastActiveElement +- [ ] ConditionalExtractAfterLastActiveElementAndReplicate +- [ ] ConditionalExtractLastActiveElement +- [ ] ConditionalExtractLastActiveElementAndReplicate +- [ ] ConditionalSelect +- [ ] ConvertToBFloat16 +- [ ] CreateFalseMaskBFloat16 +- [ ] CreateTrueMaskBFloat16 +- [ ] CreateWhileReadAfterWriteMask +- [ ] CreateWhileWriteAfterReadMask +- [ ] DotProductBySelectedScalar +- [ ] DownConvertNarrowingUpper +- [ ] DuplicateSelectedScalarToVector +- [ ] ExtractAfterLastScalar +- [ ] ExtractAfterLastVector +- [ ] ExtractLastScalar +- [ ] ExtractLastVector +- [ ] ExtractVector +- [ ] GetActiveElementCount +- [ ] InsertIntoShiftedVector +- [ ] InterleaveEvenInt128FromTwoInputs +- [ ] InterleaveInt128FromHighHalvesOfTwoInputs +- [ ] InterleaveInt128FromLowHalvesOfTwoInputs +- [ ] InterleaveOddInt128FromTwoInputs +- [ ] LoadVector +- [ ] LoadVector128AndReplicateToVector +- [ ] LoadVector256AndReplicateToVector +- [ ] LoadVectorFirstFaulting +- [ ] LoadVectorNonFaulting +- [ ] LoadVectorNonTemporal +- [ ] LoadVectorx2 +- [ ] LoadVectorx3 +- [ ] LoadVectorx4 +- [ ] PopCount +- [ ] ReverseElement +- [ ] Splice +- [ ] Store +- [ ] StoreNonTemporal +- [ ] TransposeEven +- [ ] TransposeOdd +- [ ] UnzipEven +- [ ] UnzipOdd +- [ ] VectorTableLookup +- [ ] VectorTableLookupExtension +- [ ] ZipHigh +- [ ] ZipLow + + +### [SveF32mm](https://github.com/dotnet/runtime/issues/94024) +- [ ] MatrixMultiplyAccumulate + + +### [SveF64mm](https://github.com/dotnet/runtime/issues/94025) +- [ ] ConcatenateEvenInt128FromTwoInputs +- [ ] ConcatenateOddInt128FromTwoInputs +- [ ] InterleaveEvenInt128FromTwoInputs +- [ ] InterleaveInt128FromHighHalvesOfTwoInputs +- [ ] InterleaveInt128FromLowHalvesOfTwoInputs +- [ ] InterleaveOddInt128FromTwoInputs +- [ ] LoadVector256AndReplicateToVector +- [ ] MatrixMultiplyAccumulate + + +### [SveFp16](https://github.com/dotnet/runtime/issues/94026) +- [ ] Abs +- [ ] AbsoluteCompareGreaterThan +- [ ] AbsoluteCompareGreaterThanOrEqual +- [ ] AbsoluteCompareLessThan +- [ ] AbsoluteCompareLessThanOrEqual +- [ ] AbsoluteDifference +- [ ] Add +- [ ] AddAcross +- [ ] AddPairwise +- [ ] AddRotateComplex +- [ ] AddSequentialAcross +- [ ] CompareEqual +- [ ] CompareGreaterThan +- [ ] CompareGreaterThanOrEqual +- [ ] CompareLessThan +- [ ] CompareLessThanOrEqual +- [ ] CompareNotEqualTo +- [ ] CompareUnordered +- [ ] ConcatenateEvenInt128FromTwoInputs +- [ ] ConcatenateOddInt128FromTwoInputs +- [ ] ConditionalExtractAfterLastActiveElement +- [ ] ConditionalExtractAfterLastActiveElementAndReplicate +- [ ] ConditionalExtractLastActiveElement +- [ ] ConditionalExtractLastActiveElementAndReplicate +- [ ] ConditionalSelect +- [ ] ConvertToDouble +- [ ] ConvertToHalf +- [ ] ConvertToInt16 +- [ ] ConvertToInt32 +- [ ] ConvertToInt64 +- [ ] ConvertToSingle +- [ ] ConvertToUInt16 +- [ ] ConvertToUInt32 +- [ ] ConvertToUInt64 +- [ ] CreateFalseMaskHalf +- [ ] CreateTrueMaskHalf +- [ ] CreateWhileReadAfterWriteMask +- [ ] CreateWhileWriteAfterReadMask +- [ ] Divide +- [ ] DownConvertNarrowingUpper +- [ ] DuplicateSelectedScalarToVector +- [ ] ExtractAfterLastScalar +- [ ] ExtractAfterLastVector +- [ ] ExtractLastScalar +- [ ] ExtractLastVector +- [ ] ExtractVector +- [ ] FloatingPointExponentialAccelerator +- [ ] FusedMultiplyAdd +- [ ] FusedMultiplyAddBySelectedScalar +- [ ] FusedMultiplyAddNegated +- [ ] FusedMultiplySubtract +- [ ] FusedMultiplySubtractBySelectedScalar +- [ ] FusedMultiplySubtractNegated +- [ ] GetActiveElementCount +- [ ] InsertIntoShiftedVector +- [ ] InterleaveEvenInt128FromTwoInputs +- [ ] InterleaveInt128FromHighHalvesOfTwoInputs +- [ ] InterleaveInt128FromLowHalvesOfTwoInputs +- [ ] InterleaveOddInt128FromTwoInputs +- [ ] LoadVector +- [ ] LoadVector128AndReplicateToVector +- [ ] LoadVector256AndReplicateToVector +- [ ] LoadVectorFirstFaulting +- [ ] LoadVectorNonFaulting +- [ ] LoadVectorNonTemporal +- [ ] LoadVectorx2 +- [ ] LoadVectorx3 +- [ ] LoadVectorx4 +- [ ] Log2 +- [ ] Max +- [ ] MaxAcross +- [ ] MaxNumber +- [ ] MaxNumberAcross +- [ ] MaxNumberPairwise +- [ ] MaxPairwise +- [ ] Min +- [ ] MinAcross +- [ ] MinNumber +- [ ] MinNumberAcross +- [ ] MinNumberPairwise +- [ ] MinPairwise +- [ ] Multiply +- [ ] MultiplyAddRotateComplex +- [ ] MultiplyAddRotateComplexBySelectedScalar +- [ ] MultiplyAddWideningLower +- [ ] MultiplyAddWideningUpper +- [ ] MultiplyBySelectedScalar +- [ ] MultiplyExtended +- [ ] MultiplySubtractWideningLower +- [ ] MultiplySubtractWideningUpper +- [ ] Negate +- [ ] PopCount +- [ ] ReciprocalEstimate +- [ ] ReciprocalExponent +- [ ] ReciprocalSqrtEstimate +- [ ] ReciprocalSqrtStep +- [ ] ReciprocalStep +- [ ] ReverseElement +- [ ] RoundAwayFromZero +- [ ] RoundToNearest +- [ ] RoundToNegativeInfinity +- [ ] RoundToPositiveInfinity +- [ ] RoundToZero +- [ ] Scale +- [ ] Splice +- [ ] Sqrt +- [ ] Store +- [ ] StoreNonTemporal +- [ ] Subtract +- [ ] TransposeEven +- [ ] TransposeOdd +- [ ] TrigonometricMultiplyAddCoefficient +- [ ] TrigonometricSelectCoefficient +- [ ] TrigonometricStartingValue +- [ ] UnzipEven +- [ ] UnzipOdd +- [ ] UpConvertWideningUpper +- [ ] VectorTableLookup +- [ ] VectorTableLookupExtension +- [ ] ZipHigh +- [ ] ZipLow + + +### [SveI8mm](https://github.com/dotnet/runtime/issues/94027) +- [ ] DotProductSignedUnsigned +- [ ] DotProductUnsignedSigned +- [ ] MatrixMultiplyAccumulate +- [ ] MatrixMultiplyAccumulateUnsignedSigned + + +### [Sha3](https://github.com/dotnet/runtime/issues/98692) +- [ ] BitwiseClearXor +- [ ] BitwiseRotateLeftBy1AndXor +- [ ] Xor +- [ ] XorRotateRight + + +### [Sm4](https://github.com/dotnet/runtime/issues/98696) +- [ ] Sm4EncryptionAndDecryption +- [ ] Sm4KeyUpdates + + +### [SveAes](https://github.com/dotnet/runtime/issues/94423) +- [ ] AesInverseMixColumns +- [ ] AesMixColumns +- [ ] AesSingleRoundDecryption +- [ ] AesSingleRoundEncryption +- [ ] PolynomialMultiplyWideningLower +- [ ] PolynomialMultiplyWideningUpper + + +### [SveBitperm](https://github.com/dotnet/runtime/issues/94424) +- [ ] GatherLowerBitsFromPositionsSelectedByBitmask +- [ ] GroupBitsToRightOrLeftAsSelectedByBitmask +- [ ] ScatterLowerBitsIntoPositionsSelectedByBitmask + + +### [SveSha3](https://github.com/dotnet/runtime/issues/94425) +- [ ] BitwiseRotateLeftBy1AndXor + + +### [SveSm4](https://github.com/dotnet/runtime/issues/94426) +- [ ] Sm4EncryptionAndDecryption +- [ ] Sm4KeyUpdates + + diff --git a/sve_api/post_review/apiraw_FEAT_SVE2__bitwise.cs b/sve_api/post_review/apiraw_FEAT_SVE2__bitwise.cs new file mode 100644 index 0000000000000..4fc99accb1e6c --- /dev/null +++ b/sve_api/post_review/apiraw_FEAT_SVE2__bitwise.cs @@ -0,0 +1,119 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract class Sve2 : Sve /// Feature: FEAT_SVE2 Category: bitwise +{ + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + /// op1 ^ (op2 & ~op3) + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask); // BCAX // MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right); // BSL // MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right); // BSL1N // MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right); // BSL2N // MOVPRFX + + /// T: sbyte, short, int, long + public static unsafe Vector ShiftArithmeticRounded(Vector value, Vector count); // SRSHL or SRSHLR // predicated, MOVPRFX + + /// T: [byte, sbyte], [ushort, short], [uint, int], [ulong, long] + public static unsafe Vector ShiftLogicalRounded(Vector value, Vector count); // URSHL or URSHLR // predicated, MOVPRFX + + /// T: sbyte, short, int, long + public static unsafe Vector ShiftRightArithmeticRounded(Vector value, [ConstantExpected] byte count); // SRSHR // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ShiftRightLogicalRounded(Vector value, [ConstantExpected] byte count); // URSHR // predicated, MOVPRFX + + /// T: sbyte, short, int, long + public static unsafe Vector ShiftRightArithmeticRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count); // SRSRA // MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ShiftRightLogicalRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count); // URSRA // MOVPRFX + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector ShiftRightLogicalRoundedNarrowingEven(Vector value, [ConstantExpected] byte count); // RSHRNB + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector ShiftRightLogicalRoundedNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count); // RSHRNT + + /// T: sbyte, short, int, long + public static unsafe Vector ShiftArithmeticRoundedSaturate(Vector value, Vector count); // SQRSHL or SQRSHLR // predicated, MOVPRFX + + /// T: [byte, sbyte], [ushort, short], [uint, int], [ulong, long] + public static unsafe Vector ShiftLogicalRoundedSaturate(Vector value, Vector count); // UQRSHL or UQRSHLR // predicated, MOVPRFX + + /// T: [sbyte, short], [short, int], [int, long] + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count); // SQRSHRNB + + /// T: [sbyte, short], [short, int], [int, long] + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count); // SQRSHRNT + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count); // UQRSHRNB + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count); // UQRSHRNT + + /// T: [byte, short], [ushort, int], [uint, long] + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count); // SQRSHRUNB + + /// T: [byte, short], [ushort, int], [uint, long] + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count); // SQRSHRUNT + + /// T: sbyte, short, int, long + public static unsafe Vector ShiftArithmeticSaturate(Vector value, Vector count); // SQSHL or SQSHLR // predicated, MOVPRFX + + /// T: [byte, sbyte], [ushort, short], [uint, int], [ulong, long] + public static unsafe Vector ShiftLeftLogicalSaturate(Vector value, Vector count); // UQSHL or UQSHLR // predicated, MOVPRFX + + /// T: [byte, sbyte], [ushort, short], [uint, int], [ulong, long] + public static unsafe Vector ShiftLeftLogicalSaturateUnsigned(Vector value, [ConstantExpected] byte count); // SQSHLU // predicated, MOVPRFX + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateEven(Vector value, [ConstantExpected] byte count); // SQSHRNB or UQSHRNB + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count); // SQSHRNT or UQSHRNT + + /// T: [byte, short], [ushort, int], [uint, long] + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count); // SQSHRUNB + + /// T: [byte, short], [ushort, int], [uint, long] + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count); // SQSHRUNT + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift); // SLI + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector ShiftLeftLogicalWideningEven(Vector value, [ConstantExpected] byte count); // SSHLLB or USHLLB + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector ShiftLeftLogicalWideningOdd(Vector value, [ConstantExpected] byte count); // SSHLLT or USHLLT + + /// T: sbyte, short, int, long + public static unsafe Vector ShiftRightArithmeticAdd(Vector addend, Vector value, [ConstantExpected] byte count); // SSRA // MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ShiftRightLogicalAdd(Vector addend, Vector value, [ConstantExpected] byte count); // USRA // MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift); // SRI + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector ShiftRightLogicalNarrowingEven(Vector value, [ConstantExpected] byte count); // SHRNB + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector ShiftRightLogicalNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count); // SHRNT + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3); // EOR3 // MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count]); // XAR // MOVPRFX + + /// total method signatures: 33 +} diff --git a/sve_api/post_review/apiraw_FEAT_SVE2__bitwise_sorted.cs b/sve_api/post_review/apiraw_FEAT_SVE2__bitwise_sorted.cs new file mode 100644 index 0000000000000..b638df2402f10 --- /dev/null +++ b/sve_api/post_review/apiraw_FEAT_SVE2__bitwise_sorted.cs @@ -0,0 +1,119 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract class Sve2 : Sve /// Feature: FEAT_SVE2 Category: bitwise +{ + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + /// op1 ^ (op2 & ~op3) + public static unsafe Vector BitwiseClearXor(Vector xor, Vector value, Vector mask); // BCAX // MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector BitwiseSelect(Vector select, Vector left, Vector right); // BSL // MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector BitwiseSelectLeftInverted(Vector select, Vector left, Vector right); // BSL1N // MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector BitwiseSelectRightInverted(Vector select, Vector left, Vector right); // BSL2N // MOVPRFX + + /// T: sbyte, short, int, long + public static unsafe Vector ShiftArithmeticRounded(Vector value, Vector count); // SRSHL or SRSHLR // predicated, MOVPRFX + + /// T: sbyte, short, int, long + public static unsafe Vector ShiftArithmeticRoundedSaturate(Vector value, Vector count); // SQRSHL or SQRSHLR // predicated, MOVPRFX + + /// T: sbyte, short, int, long + public static unsafe Vector ShiftArithmeticSaturate(Vector value, Vector count); // SQSHL or SQSHLR // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ShiftLeftAndInsert(Vector left, Vector right, [ConstantExpected] byte shift); // SLI + + /// T: [byte, sbyte], [ushort, short], [uint, int], [ulong, long] + public static unsafe Vector ShiftLeftLogicalSaturate(Vector value, Vector count); // UQSHL or UQSHLR // predicated, MOVPRFX + + /// T: [byte, sbyte], [ushort, short], [uint, int], [ulong, long] + public static unsafe Vector ShiftLeftLogicalSaturateUnsigned(Vector value, [ConstantExpected] byte count); // SQSHLU // predicated, MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector ShiftLeftLogicalWideningEven(Vector value, [ConstantExpected] byte count); // SSHLLB or USHLLB + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector ShiftLeftLogicalWideningOdd(Vector value, [ConstantExpected] byte count); // SSHLLT or USHLLT + + /// T: [byte, sbyte], [ushort, short], [uint, int], [ulong, long] + public static unsafe Vector ShiftLogicalRounded(Vector value, Vector count); // URSHL or URSHLR // predicated, MOVPRFX + + /// T: [byte, sbyte], [ushort, short], [uint, int], [ulong, long] + public static unsafe Vector ShiftLogicalRoundedSaturate(Vector value, Vector count); // UQRSHL or UQRSHLR // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ShiftRightAndInsert(Vector left, Vector right, [ConstantExpected] byte shift); // SRI + + /// T: sbyte, short, int, long + public static unsafe Vector ShiftRightArithmeticAdd(Vector addend, Vector value, [ConstantExpected] byte count); // SSRA // MOVPRFX + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateEven(Vector value, [ConstantExpected] byte count); // SQSHRNB or UQSHRNB + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count); // SQSHRNT or UQSHRNT + + /// T: [byte, short], [ushort, int], [uint, long] + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count); // SQSHRUNB + + /// T: [byte, short], [ushort, int], [uint, long] + public static unsafe Vector ShiftRightArithmeticNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count); // SQSHRUNT + + /// T: sbyte, short, int, long + public static unsafe Vector ShiftRightArithmeticRounded(Vector value, [ConstantExpected] byte count); // SRSHR // predicated, MOVPRFX + + /// T: sbyte, short, int, long + public static unsafe Vector ShiftRightArithmeticRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count); // SRSRA // MOVPRFX + + /// T: [sbyte, short], [short, int], [int, long] + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count); // SQRSHRNB + + /// T: [sbyte, short], [short, int], [int, long] + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count); // SQRSHRNT + + /// T: [byte, short], [ushort, int], [uint, long] + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedEven(Vector value, [ConstantExpected] byte count); // SQRSHRUNB + + /// T: [byte, short], [ushort, int], [uint, long] + public static unsafe Vector ShiftRightArithmeticRoundedNarrowingSaturateUnsignedOdd(Vector even, Vector value, [ConstantExpected] byte count); // SQRSHRUNT + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ShiftRightLogicalAdd(Vector addend, Vector value, [ConstantExpected] byte count); // USRA // MOVPRFX + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector ShiftRightLogicalNarrowingEven(Vector value, [ConstantExpected] byte count); // SHRNB + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector ShiftRightLogicalNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count); // SHRNT + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ShiftRightLogicalRounded(Vector value, [ConstantExpected] byte count); // URSHR // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ShiftRightLogicalRoundedAdd(Vector addend, Vector value, [ConstantExpected] byte count); // URSRA // MOVPRFX + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector ShiftRightLogicalRoundedNarrowingEven(Vector value, [ConstantExpected] byte count); // RSHRNB + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector ShiftRightLogicalRoundedNarrowingOdd(Vector even, Vector value, [ConstantExpected] byte count); // RSHRNT + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateEven(Vector value, [ConstantExpected] byte count); // UQRSHRNB + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector ShiftRightLogicalRoundedNarrowingSaturateOdd(Vector even, Vector value, [ConstantExpected] byte count); // UQRSHRNT + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector Xor(Vector value1, Vector value2, Vector value3); // EOR3 // MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector XorRotateRight(Vector left, Vector right, [ConstantExpected] byte count]); // XAR // MOVPRFX + + /// total method signatures: 33 +} diff --git a/sve_api/post_review/apiraw_FEAT_SVE2__maths.cs b/sve_api/post_review/apiraw_FEAT_SVE2__maths.cs new file mode 100644 index 0000000000000..91abcd0ee1f2b --- /dev/null +++ b/sve_api/post_review/apiraw_FEAT_SVE2__maths.cs @@ -0,0 +1,261 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract class Sve2 : Sve /// Feature: FEAT_SVE2 Category: maths +{ + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right); // SABA or UABA // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector AbsoluteDifferenceWideningLowerAndAddEven(Vector addend, Vector left, Vector right); // SABALB or UABALB // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector AbsoluteDifferenceWideningLowerAndAddOdd(Vector addend, Vector left, Vector right); // SABALT or UABALT // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector AbsoluteDifferenceWideningEven(Vector left, Vector right); // SABDLB or UABDLB + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector AbsoluteDifferenceWideningOdd(Vector left, Vector right); // SABDLT or UABDLT + + /// T: uint, ulong + public static unsafe Vector AddCarryWideningEven(Vector left, Vector right, Vector carry); // ADCLB // MOVPRFX + + /// T: uint, ulong + public static unsafe Vector AddCarryWideningOdd(Vector left, Vector right, Vector carry); // ADCLT // MOVPRFX + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector AddHighNarrowingEven(Vector left, Vector right); // ADDHNB + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector AddHighNarrowingOdd(Vector even, Vector left, Vector right); // ADDHNT + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector AddPairwise(Vector left, Vector right); // FADDP or ADDP // predicated, MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector AddPairwiseWideningAndAdd(Vector addend, Vector value); // SADALP or UADALP // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector AddSaturate(Vector left, Vector right); // SQADD or UQADD // predicated, MOVPRFX + + /// T: [byte, sbyte], [ushort, short], [uint, int], [ulong, long] + public static unsafe Vector AddSaturate(Vector left, Vector right); // USQADD // predicated, MOVPRFX + + /// T: [sbyte, byte], [short, ushort], [int, uint], [long, ulong] + public static unsafe Vector AddSaturate(Vector left, Vector right); // SUQADD // predicated, MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector AddWideningEven(Vector left, Vector right); // SADDWB or UADDWB + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector AddWideningOdd(Vector left, Vector right); // SADDWT or UADDWT + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector AddWideningEven(Vector left, Vector right); // SADDLB or UADDLB + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector AddWideningOdd(Vector left, Vector right); // SADDLT or UADDLT + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector AddWideningEvenOdd(Vector leftEven, Vector rightOdd); // SADDLBT + + /// T: [int, sbyte], [long, short] + public static unsafe Vector DotProductRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation); // CDOT // MOVPRFX + + /// T: [int, sbyte], [long, short] + public static unsafe Vector DotProductRotateComplexBySelectedIndex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation); // CDOT // MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector FusedAddHalving(Vector left, Vector right); // SHADD or UHADD // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector FusedSubtractHalving(Vector left, Vector right); // SHSUB or UHSUB or SHSUBR or UHSUBR // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector MaxNumberPairwise(Vector left, Vector right); // FMAXNMP // predicated, MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector MaxPairwise(Vector left, Vector right); // FMAXP or SMAXP or UMAXP // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector MinNumberPairwise(Vector left, Vector right); // FMINNMP // predicated, MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector MinPairwise(Vector left, Vector right); // FMINP or SMINP or UMINP // predicated, MOVPRFX + + /// T: short, int, long, ushort, uint, ulong + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightTndex); // MUL + + /// T: short, int, long, ushort, uint, ulong + public static unsafe Vector MultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // MLA // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyWideningEvenAndAdd(Vector addend, Vector left, Vector right); // SMLALB or UMLALB // MOVPRFX + + /// T: [int, short], [long, int], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyBySelectedScalarWideningEvenAndAdd(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // SMLALB or UMLALB // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyWideningOddAndAdd(Vector addend, Vector left, Vector right); // SMLALT or UMLALT // MOVPRFX + + /// T: [int, short], [long, int], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyBySelectedScalarWideningOddAndAdd(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // SMLALT or UMLALT // MOVPRFX + + /// T: short, int, long, ushort, uint, ulong + public static unsafe Vector MultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // MLS // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyWideningEvenAndSubtract(Vector minuend, Vector left, Vector right); // SMLSLB or UMLSLB // MOVPRFX + + /// T: [int, short], [long, int], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyBySelectedScalarWideningEvenAndSubtract(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // SMLSLB or UMLSLB // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyWideningOddAndSubtract(Vector minuend, Vector left, Vector right); // SMLSLT or UMLSLT // MOVPRFX + + /// T: [int, short], [long, int], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyBySelectedScalarWideningOddAndSubtract(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // SMLSLT or UMLSLT // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyWideningEven(Vector left, Vector right); // SMULLB or UMULLB + + /// T: [int, short], [long, int], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyBySelectedScalarWideningEven(Vector left, Vector right, [ConstantExpected] byte rightIndex); // SMULLB or UMULLB + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyWideningOdd(Vector left, Vector right); // SMULLT or UMULLT + + /// T: [int, short], [long, int], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyBySelectedScalarWideningOdd(Vector left, Vector right, [ConstantExpected] byte rightIndex); // SMULLT or UMULLT + + public static unsafe Vector PolynomialMultiply(Vector left, Vector right); // PMUL + public static unsafe Vector PolynomialMultiply(Vector left, Vector right); // PMUL + + /// T: [ushort, byte], [ulong, uint] + public static unsafe Vector PolynomialMultiplyWideningEven(Vector left, Vector right); // PMULLB + + /// T: [ushort, byte], [ulong, uint] + public static unsafe Vector PolynomialMultiplyWideningOdd(Vector left, Vector right); // PMULLT + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector AddRoundedHighNarrowingEven(Vector left, Vector right); // RADDHNB + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector AddRoundedHighNarrowingOdd(Vector even, Vector left, Vector right); // RADDHNT + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector FusedAddRoundedHalving(Vector left, Vector right); // SRHADD or URHADD // predicated, MOVPRFX + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector SubtractRoundedHighNarrowingEven(Vector left, Vector right); // RSUBHNB + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector SubtractRoundedHighNarrowingOdd(Vector even, Vector left, Vector right); // RSUBHNT + + /// T: sbyte, short, int, long + public static unsafe Vector AbsSaturate(Vector value); // SQABS // predicated, MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningAndAddSaturateEven(Vector addend, Vector left, Vector right); // SQDMLALB // MOVPRFX + + /// T: [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningBySelectedScalarAndAddSaturateEven(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // SQDMLALB // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningAndAddSaturateEvenOdd(Vector addend, Vector leftEven, Vector rightOdd); // SQDMLALBT // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningAndAddSaturateOdd(Vector addend, Vector left, Vector right); // SQDMLALT // MOVPRFX + + /// T: [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningBySelectedScalarAndAddSaturateOdd(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // SQDMLALT // MOVPRFX + + /// T: sbyte, short, int, long + public static unsafe Vector MultiplyDoublingSaturateHigh(Vector left, Vector right); // SQDMULH + + /// T: short, int, long + public static unsafe Vector MultiplyDoublingBySelectedScalarSaturateHigh(Vector left, Vector right, [ConstantExpected] byte rightIndex); // SQDMULH + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningAndSubtractSaturateEven(Vector minuend, Vector left, Vector right); // SQDMLSLB // MOVPRFX + + /// T: [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningBySelectedScalarAndSubtractSaturateEven(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // SQDMLSLB // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningAndSubtractSaturateEvenOdd(Vector minuend, Vector leftEven, Vector rightOdd); // SQDMLSLBT // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningAndSubtractSaturateOdd(Vector minuend, Vector left, Vector right); // SQDMLSLT // MOVPRFX + + /// T: [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningAndSubtractSaturateOdd(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // SQDMLSLT // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningSaturateEven(Vector left, Vector right); // SQDMULLB + + /// T: [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningSaturateEvenBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex); // SQDMULLB + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningSaturateOdd(Vector left, Vector right); // SQDMULLT + + /// T: [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningSaturateOddBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex); // SQDMULLT + + /// T: sbyte, short, int, long + public static unsafe Vector NegateSaturate(Vector value); // SQNEG // predicated, MOVPRFX + + /// T: sbyte, short, int, long + public static unsafe Vector MultiplyRoundedDoublingSaturateAndAddHigh(Vector addend, Vector left, Vector right); // SQRDMLAH // MOVPRFX + + /// T: short, int, long + public static unsafe Vector MultiplyRoundedDoublingSaturateBySelectedScalarAndAddHigh(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // SQRDMLAH // MOVPRFX + + /// T: sbyte, short, int, long + public static unsafe Vector MultiplyRoundedDoublingSaturateHigh(Vector left, Vector right); // SQRDMULH + + /// T: short, int, long + public static unsafe Vector MultiplyRoundedDoublingBySelectedScalarSaturateHigh(Vector left, Vector right, [ConstantExpected] byte rightIndex); // SQRDMULH + + /// T: sbyte, short, int, long + public static unsafe Vector MultiplyRoundedDoublingSaturateAndSubtractHigh(Vector minuend, Vector left, Vector right); // SQRDMLSH // MOVPRFX + + /// T: short, int, long + public static unsafe Vector MultiplyRoundedDoublingSaturateBySelectedScalarAndSubtractHigh(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // SQRDMLSH // MOVPRFX + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector SubtractHighNarrowingEven(Vector left, Vector right); // SUBHNB + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector SubtractHighNarrowingOdd(Vector even, Vector left, Vector right); // SUBHNT + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector SubtractSaturate(Vector left, Vector right); // SQSUB or UQSUB or SQSUBR or UQSUBR // predicated, MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector SubtractWideningEven(Vector left, Vector right); // SSUBWB or USUBWB + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector SubtractWideningOdd(Vector left, Vector right); // SSUBWT or USUBWT + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector SubtractWideningEven(Vector left, Vector right); // SSUBLB or USUBLB + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector SubtractWideningEvenOdd(Vector leftEven, Vector rightOdd); // SSUBLBT + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector SubtractWideningOdd(Vector left, Vector right); // SSUBLT or USUBLT + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector SubtractWideningOddEven(Vector leftOdd, Vector rightEven); // SSUBLTB + + /// T: uint, ulong + public static unsafe Vector SubtractBorrowWideningEven(Vector left, Vector right, Vector borrow); // SBCLB // MOVPRFX + + /// T: uint, ulong + public static unsafe Vector SubtractBorrowWideningOdd(Vector left, Vector right, Vector borrow); // SBCLT // MOVPRFX +} diff --git a/sve_api/post_review/apiraw_FEAT_SVE2__maths_sorted.cs b/sve_api/post_review/apiraw_FEAT_SVE2__maths_sorted.cs new file mode 100644 index 0000000000000..91abcd0ee1f2b --- /dev/null +++ b/sve_api/post_review/apiraw_FEAT_SVE2__maths_sorted.cs @@ -0,0 +1,261 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract class Sve2 : Sve /// Feature: FEAT_SVE2 Category: maths +{ + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector AbsoluteDifferenceAdd(Vector addend, Vector left, Vector right); // SABA or UABA // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector AbsoluteDifferenceWideningLowerAndAddEven(Vector addend, Vector left, Vector right); // SABALB or UABALB // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector AbsoluteDifferenceWideningLowerAndAddOdd(Vector addend, Vector left, Vector right); // SABALT or UABALT // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector AbsoluteDifferenceWideningEven(Vector left, Vector right); // SABDLB or UABDLB + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector AbsoluteDifferenceWideningOdd(Vector left, Vector right); // SABDLT or UABDLT + + /// T: uint, ulong + public static unsafe Vector AddCarryWideningEven(Vector left, Vector right, Vector carry); // ADCLB // MOVPRFX + + /// T: uint, ulong + public static unsafe Vector AddCarryWideningOdd(Vector left, Vector right, Vector carry); // ADCLT // MOVPRFX + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector AddHighNarrowingEven(Vector left, Vector right); // ADDHNB + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector AddHighNarrowingOdd(Vector even, Vector left, Vector right); // ADDHNT + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector AddPairwise(Vector left, Vector right); // FADDP or ADDP // predicated, MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector AddPairwiseWideningAndAdd(Vector addend, Vector value); // SADALP or UADALP // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector AddSaturate(Vector left, Vector right); // SQADD or UQADD // predicated, MOVPRFX + + /// T: [byte, sbyte], [ushort, short], [uint, int], [ulong, long] + public static unsafe Vector AddSaturate(Vector left, Vector right); // USQADD // predicated, MOVPRFX + + /// T: [sbyte, byte], [short, ushort], [int, uint], [long, ulong] + public static unsafe Vector AddSaturate(Vector left, Vector right); // SUQADD // predicated, MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector AddWideningEven(Vector left, Vector right); // SADDWB or UADDWB + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector AddWideningOdd(Vector left, Vector right); // SADDWT or UADDWT + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector AddWideningEven(Vector left, Vector right); // SADDLB or UADDLB + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector AddWideningOdd(Vector left, Vector right); // SADDLT or UADDLT + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector AddWideningEvenOdd(Vector leftEven, Vector rightOdd); // SADDLBT + + /// T: [int, sbyte], [long, short] + public static unsafe Vector DotProductRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation); // CDOT // MOVPRFX + + /// T: [int, sbyte], [long, short] + public static unsafe Vector DotProductRotateComplexBySelectedIndex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation); // CDOT // MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector FusedAddHalving(Vector left, Vector right); // SHADD or UHADD // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector FusedSubtractHalving(Vector left, Vector right); // SHSUB or UHSUB or SHSUBR or UHSUBR // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector MaxNumberPairwise(Vector left, Vector right); // FMAXNMP // predicated, MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector MaxPairwise(Vector left, Vector right); // FMAXP or SMAXP or UMAXP // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector MinNumberPairwise(Vector left, Vector right); // FMINNMP // predicated, MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector MinPairwise(Vector left, Vector right); // FMINP or SMINP or UMINP // predicated, MOVPRFX + + /// T: short, int, long, ushort, uint, ulong + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightTndex); // MUL + + /// T: short, int, long, ushort, uint, ulong + public static unsafe Vector MultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // MLA // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyWideningEvenAndAdd(Vector addend, Vector left, Vector right); // SMLALB or UMLALB // MOVPRFX + + /// T: [int, short], [long, int], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyBySelectedScalarWideningEvenAndAdd(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // SMLALB or UMLALB // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyWideningOddAndAdd(Vector addend, Vector left, Vector right); // SMLALT or UMLALT // MOVPRFX + + /// T: [int, short], [long, int], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyBySelectedScalarWideningOddAndAdd(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // SMLALT or UMLALT // MOVPRFX + + /// T: short, int, long, ushort, uint, ulong + public static unsafe Vector MultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // MLS // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyWideningEvenAndSubtract(Vector minuend, Vector left, Vector right); // SMLSLB or UMLSLB // MOVPRFX + + /// T: [int, short], [long, int], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyBySelectedScalarWideningEvenAndSubtract(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // SMLSLB or UMLSLB // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyWideningOddAndSubtract(Vector minuend, Vector left, Vector right); // SMLSLT or UMLSLT // MOVPRFX + + /// T: [int, short], [long, int], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyBySelectedScalarWideningOddAndSubtract(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // SMLSLT or UMLSLT // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyWideningEven(Vector left, Vector right); // SMULLB or UMULLB + + /// T: [int, short], [long, int], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyBySelectedScalarWideningEven(Vector left, Vector right, [ConstantExpected] byte rightIndex); // SMULLB or UMULLB + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyWideningOdd(Vector left, Vector right); // SMULLT or UMULLT + + /// T: [int, short], [long, int], [uint, ushort], [ulong, uint] + public static unsafe Vector MultiplyBySelectedScalarWideningOdd(Vector left, Vector right, [ConstantExpected] byte rightIndex); // SMULLT or UMULLT + + public static unsafe Vector PolynomialMultiply(Vector left, Vector right); // PMUL + public static unsafe Vector PolynomialMultiply(Vector left, Vector right); // PMUL + + /// T: [ushort, byte], [ulong, uint] + public static unsafe Vector PolynomialMultiplyWideningEven(Vector left, Vector right); // PMULLB + + /// T: [ushort, byte], [ulong, uint] + public static unsafe Vector PolynomialMultiplyWideningOdd(Vector left, Vector right); // PMULLT + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector AddRoundedHighNarrowingEven(Vector left, Vector right); // RADDHNB + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector AddRoundedHighNarrowingOdd(Vector even, Vector left, Vector right); // RADDHNT + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector FusedAddRoundedHalving(Vector left, Vector right); // SRHADD or URHADD // predicated, MOVPRFX + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector SubtractRoundedHighNarrowingEven(Vector left, Vector right); // RSUBHNB + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector SubtractRoundedHighNarrowingOdd(Vector even, Vector left, Vector right); // RSUBHNT + + /// T: sbyte, short, int, long + public static unsafe Vector AbsSaturate(Vector value); // SQABS // predicated, MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningAndAddSaturateEven(Vector addend, Vector left, Vector right); // SQDMLALB // MOVPRFX + + /// T: [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningBySelectedScalarAndAddSaturateEven(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // SQDMLALB // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningAndAddSaturateEvenOdd(Vector addend, Vector leftEven, Vector rightOdd); // SQDMLALBT // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningAndAddSaturateOdd(Vector addend, Vector left, Vector right); // SQDMLALT // MOVPRFX + + /// T: [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningBySelectedScalarAndAddSaturateOdd(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // SQDMLALT // MOVPRFX + + /// T: sbyte, short, int, long + public static unsafe Vector MultiplyDoublingSaturateHigh(Vector left, Vector right); // SQDMULH + + /// T: short, int, long + public static unsafe Vector MultiplyDoublingBySelectedScalarSaturateHigh(Vector left, Vector right, [ConstantExpected] byte rightIndex); // SQDMULH + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningAndSubtractSaturateEven(Vector minuend, Vector left, Vector right); // SQDMLSLB // MOVPRFX + + /// T: [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningBySelectedScalarAndSubtractSaturateEven(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // SQDMLSLB // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningAndSubtractSaturateEvenOdd(Vector minuend, Vector leftEven, Vector rightOdd); // SQDMLSLBT // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningAndSubtractSaturateOdd(Vector minuend, Vector left, Vector right); // SQDMLSLT // MOVPRFX + + /// T: [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningAndSubtractSaturateOdd(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // SQDMLSLT // MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningSaturateEven(Vector left, Vector right); // SQDMULLB + + /// T: [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningSaturateEvenBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex); // SQDMULLB + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningSaturateOdd(Vector left, Vector right); // SQDMULLT + + /// T: [int, short], [long, int] + public static unsafe Vector MultiplyDoublingWideningSaturateOddBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex); // SQDMULLT + + /// T: sbyte, short, int, long + public static unsafe Vector NegateSaturate(Vector value); // SQNEG // predicated, MOVPRFX + + /// T: sbyte, short, int, long + public static unsafe Vector MultiplyRoundedDoublingSaturateAndAddHigh(Vector addend, Vector left, Vector right); // SQRDMLAH // MOVPRFX + + /// T: short, int, long + public static unsafe Vector MultiplyRoundedDoublingSaturateBySelectedScalarAndAddHigh(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // SQRDMLAH // MOVPRFX + + /// T: sbyte, short, int, long + public static unsafe Vector MultiplyRoundedDoublingSaturateHigh(Vector left, Vector right); // SQRDMULH + + /// T: short, int, long + public static unsafe Vector MultiplyRoundedDoublingBySelectedScalarSaturateHigh(Vector left, Vector right, [ConstantExpected] byte rightIndex); // SQRDMULH + + /// T: sbyte, short, int, long + public static unsafe Vector MultiplyRoundedDoublingSaturateAndSubtractHigh(Vector minuend, Vector left, Vector right); // SQRDMLSH // MOVPRFX + + /// T: short, int, long + public static unsafe Vector MultiplyRoundedDoublingSaturateBySelectedScalarAndSubtractHigh(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex); // SQRDMLSH // MOVPRFX + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector SubtractHighNarrowingEven(Vector left, Vector right); // SUBHNB + + /// T: [sbyte, short], [short, int], [int, long], [byte, ushort], [ushort, uint], [uint, ulong] + public static unsafe Vector SubtractHighNarrowingOdd(Vector even, Vector left, Vector right); // SUBHNT + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector SubtractSaturate(Vector left, Vector right); // SQSUB or UQSUB or SQSUBR or UQSUBR // predicated, MOVPRFX + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector SubtractWideningEven(Vector left, Vector right); // SSUBWB or USUBWB + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector SubtractWideningOdd(Vector left, Vector right); // SSUBWT or USUBWT + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector SubtractWideningEven(Vector left, Vector right); // SSUBLB or USUBLB + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector SubtractWideningEvenOdd(Vector leftEven, Vector rightOdd); // SSUBLBT + + /// T: [short, sbyte], [int, short], [long, int], [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector SubtractWideningOdd(Vector left, Vector right); // SSUBLT or USUBLT + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector SubtractWideningOddEven(Vector leftOdd, Vector rightEven); // SSUBLTB + + /// T: uint, ulong + public static unsafe Vector SubtractBorrowWideningEven(Vector left, Vector right, Vector borrow); // SBCLB // MOVPRFX + + /// T: uint, ulong + public static unsafe Vector SubtractBorrowWideningOdd(Vector left, Vector right, Vector borrow); // SBCLT // MOVPRFX +} diff --git a/sve_api/post_review/apiraw_FEAT_SVE__bitmanipulate.cs b/sve_api/post_review/apiraw_FEAT_SVE__bitmanipulate.cs new file mode 100644 index 0000000000000..a34c692f7cfa0 --- /dev/null +++ b/sve_api/post_review/apiraw_FEAT_SVE__bitmanipulate.cs @@ -0,0 +1,105 @@ +namespace System.Numerics; + +partial class Vector +{ + public static Vector CreateSequence(T start, T step); +} + +partial class Vector64 +{ + public static Vector64 CreateSequence(T start, T step); +} + +partial class Vector128 +{ + public static Vector128 CreateSequence(T start, T step); +} + +partial class Vector256 +{ + public static Vector256 CreateSequence(T start, T step); +} + +partial class Vector256 +{ + public static Vector256 CreateSequence(T start, T step); +} + +partial class Vector +{ + public static Vector Indices { get; } +} + +partial class Vector64 +{ + public static Vector64 Indices { get; } +} + +partial class Vector128 +{ + public static Vector64 Indices { get; } +} + +partial class Vector256 +{ + public static Vector64 Indices { get; } +} + +partial class Vector256 +{ + public static Vector64 Indices { get; } +} + +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: bitmanipulate +{ + /// T: double, long, ulong, float, sbyte, short, int, byte, ushort, uint + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index); // DUP or TBL + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ReverseBits(Vector value); // RBIT // predicated, MOVPRFX + + /// T: short, int, long, ushort, uint, ulong + public static unsafe Vector ReverseElement8(Vector value); // REVB // predicated, MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ReverseElement(Vector value); // REV + + /// T: int, long, uint, ulong + public static unsafe Vector ReverseElement16(Vector value); // REVH // predicated, MOVPRFX + + /// T: long, ulong + public static unsafe Vector ReverseElement32(Vector value); // REVW // predicated, MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector Splice(Vector mask, Vector left, Vector right); // SPLICE // MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector TransposeEven(Vector left, Vector right); // TRN1 + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector TransposeOdd(Vector left, Vector right); // TRN2 + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector UnzipEven(Vector left, Vector right); // UZP1 + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector UnzipOdd(Vector left, Vector right); // UZP2 + + /// T: [float, uint], [double, ulong], [sbyte, byte], [short, ushort], [int, uint], [long, ulong] + public static unsafe Vector VectorTableLookup(Vector data, Vector indices); // TBL + + /// T: byte, ushort, uint, ulong + public static unsafe Vector VectorTableLookup(Vector data, Vector indices); // TBL + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ZipHigh(Vector left, Vector right); // ZIP2 + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ZipLow(Vector left, Vector right); // ZIP1 + + /// total method signatures: 20 + +} \ No newline at end of file diff --git a/sve_api/post_review/apiraw_FEAT_SVE__bitmanipulate_sorted.cs b/sve_api/post_review/apiraw_FEAT_SVE__bitmanipulate_sorted.cs new file mode 100644 index 0000000000000..820d34ffd9d87 --- /dev/null +++ b/sve_api/post_review/apiraw_FEAT_SVE__bitmanipulate_sorted.cs @@ -0,0 +1,53 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: bitmanipulate +{ + /// T: double, long, ulong, float, sbyte, short, int, byte, ushort, uint + public static unsafe Vector DuplicateSelectedScalarToVector(Vector data, [ConstantExpected] byte index); // DUP or TBL + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ReverseBits(Vector value); // RBIT // predicated, MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ReverseElement(Vector value); // REV + + /// T: int, long, uint, ulong + public static unsafe Vector ReverseElement16(Vector value); // REVH // predicated, MOVPRFX + + /// T: long, ulong + public static unsafe Vector ReverseElement32(Vector value); // REVW // predicated, MOVPRFX + + /// T: short, int, long, ushort, uint, ulong + public static unsafe Vector ReverseElement8(Vector value); // REVB // predicated, MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector Splice(Vector mask, Vector left, Vector right); // SPLICE // MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector TransposeEven(Vector left, Vector right); // TRN1 + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector TransposeOdd(Vector left, Vector right); // TRN2 + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector UnzipEven(Vector left, Vector right); // UZP1 + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector UnzipOdd(Vector left, Vector right); // UZP2 + + /// T: [float, uint], [double, ulong], [sbyte, byte], [short, ushort], [int, uint], [long, ulong] + public static unsafe Vector VectorTableLookup(Vector data, Vector indices); // TBL + + /// T: byte, ushort, uint, ulong + public static unsafe Vector VectorTableLookup(Vector data, Vector indices); // TBL + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ZipHigh(Vector left, Vector right); // ZIP2 + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ZipLow(Vector left, Vector right); // ZIP1 + + /// total method signatures: 20 + +} \ No newline at end of file diff --git a/sve_api/post_review/apiraw_FEAT_SVE__bitwise.cs b/sve_api/post_review/apiraw_FEAT_SVE__bitwise.cs new file mode 100644 index 0000000000000..f78ee5af6e6e1 --- /dev/null +++ b/sve_api/post_review/apiraw_FEAT_SVE__bitwise.cs @@ -0,0 +1,60 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract class Sve : AdvSimd /// Feature: FEAT_SVE Category: bitwise +{ + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector And(Vector left, Vector right); // AND // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector AndAcross(Vector value); // ANDV // predicated + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector AndNot(Vector left, Vector right); // NAND // predicated + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector BitwiseClear(Vector left, Vector right); // BIC // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector BooleanNot(Vector value); // CNOT // predicated, MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector InsertIntoShiftedVector(Vector left, T right); // INSR + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector Not(Vector value); // NOT or EOR // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector Or(Vector left, Vector right); // ORR // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector OrAcross(Vector value); // ORV // predicated + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector OrNot(Vector left, Vector right); // NOR or ORN // predicated + + /// T: [sbyte, byte], [short, ushort], [int, uint], [long, ulong], [sbyte, ulong], [short, ulong], [int, ulong], [byte, ulong], [ushort, ulong], [uint, ulong] + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right); // LSL or LSLR // predicated, MOVPRFX + + /// T: byte, ushort, uint, ulong + public static unsafe Vector ShiftLeftLogical(Vector left, Vector right); // LSL or LSLR // predicated, MOVPRFX + + /// T: [sbyte, byte], [short, ushort], [int, uint], [long, ulong], [sbyte, ulong], [short, ulong], [int, ulong] + public static unsafe Vector ShiftRightArithmetic(Vector left, Vector right); // ASR or ASRR // predicated, MOVPRFX + + /// T: sbyte, short, int, long + public static unsafe Vector ShiftRightArithmeticForDivide(Vector value, [ConstantExpected] byte control); // ASRD // predicated, MOVPRFX + + /// T: byte, ushort, uint, ulong + public static unsafe Vector ShiftRightLogical(Vector left, Vector right); // LSR or LSRR // predicated, MOVPRFX + + /// T: [byte, ulong], [ushort, ulong], [uint, ulong] + public static unsafe Vector ShiftRightLogical(Vector left, Vector right); // LSR // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector Xor(Vector left, Vector right); // EOR // predicated, MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector XorAcross(Vector value); // EORV // predicated + +} \ No newline at end of file diff --git a/sve_api/post_review/apiraw_FEAT_SVE__counting.cs b/sve_api/post_review/apiraw_FEAT_SVE__counting.cs new file mode 100644 index 0000000000000..95505290d08e6 --- /dev/null +++ b/sve_api/post_review/apiraw_FEAT_SVE__counting.cs @@ -0,0 +1,162 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: counting +{ + public static unsafe ulong Count16BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // CNTH + public static unsafe ulong Count32BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // CNTW + public static unsafe ulong Count64BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // CNTD + public static unsafe ulong Count8BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // CNTB + + /// T: byte, ushort, uint, ulong, sbyte, short, int, long, float, double + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from); // CNTP + + /// T: [byte, sbyte], [ushort, short], [uint, int], [ulong, long] + public static unsafe Vector LeadingSignCount(Vector value); // CLS // predicated, MOVPRFX + + /// T: [byte, sbyte], [ushort, short], [uint, int], [ulong, long] + public static unsafe Vector LeadingZeroCount(Vector value); // CLZ // predicated, MOVPRFX + + /// T: byte, ushort, uint, ulong + public static unsafe Vector LeadingZeroCount(Vector value); // CLZ // predicated, MOVPRFX + + /// T: [uint, float], [ulong, double], [byte, sbyte], [ushort, short], [uint, int], [ulong, long] + public static unsafe Vector PopCount(Vector value); // CNT // predicated, MOVPRFX + + /// T: byte, ushort, uint, ulong + public static unsafe Vector PopCount(Vector value); // CNT // predicated, MOVPRFX + + /// T: byte, ushort, uint, ulong + public static unsafe int SaturatingDecrementByActiveElementCount(int value, Vector from); // SQDECP + + /// T: byte, ushort, uint, ulong + public static unsafe long SaturatingDecrementByActiveElementCount(long value, Vector from); // SQDECP + + /// T: byte, ushort, uint, ulong + public static unsafe uint SaturatingDecrementByActiveElementCount(uint value, Vector from); // UQDECP + + /// T: byte, ushort, uint, ulong + public static unsafe ulong SaturatingDecrementByActiveElementCount(ulong value, Vector from); // UQDECP + + /// T: short, int, long, ushort, uint, ulong + public static unsafe Vector SaturatingDecrementByActiveElementCount(Vector value, Vector from); // SQDECP or UQDECP // MOVPRFX + + public static unsafe int SaturatingDecrementBy8BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECB + + public static unsafe long SaturatingDecrementBy8BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECB + + public static unsafe uint SaturatingDecrementBy8BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQDECB + + public static unsafe ulong SaturatingDecrementBy8BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQDECB + + public static unsafe int SaturatingDecrementBy16BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECH + + public static unsafe long SaturatingDecrementBy16BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECH + + public static unsafe uint SaturatingDecrementBy16BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQDECH + + public static unsafe ulong SaturatingDecrementBy16BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQDECH + + /// T: short, ushort + public static unsafe Vector SaturatingDecrementBy16BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECH or UQDECH // MOVPRFX + + public static unsafe int SaturatingDecrementBy32BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECW + + public static unsafe long SaturatingDecrementBy32BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECW + + public static unsafe uint SaturatingDecrementBy32BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQDECW + + public static unsafe ulong SaturatingDecrementBy32BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQDECW + + /// T: int, uint + public static unsafe Vector SaturatingDecrementBy32BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECW or UQDECW // MOVPRFX + + public static unsafe int SaturatingDecrementBy64BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECD + + public static unsafe long SaturatingDecrementBy64BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECD + + public static unsafe uint SaturatingDecrementBy64BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQDECD + + public static unsafe ulong SaturatingDecrementBy64BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQDECD + + /// T: long, ulong + public static unsafe Vector SaturatingDecrementBy64BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECD or UQDECD // MOVPRFX + + /// T: byte, ushort, uint, ulong + public static unsafe int SaturatingIncrementByActiveElementCount(int value, Vector from); // SQINCP + + /// T: byte, ushort, uint, ulong + public static unsafe long SaturatingIncrementByActiveElementCount(long value, Vector from); // SQINCP + + /// T: byte, ushort, uint, ulong + public static unsafe uint SaturatingIncrementByActiveElementCount(uint value, Vector from); // UQINCP + + /// T: byte, ushort, uint, ulong + public static unsafe ulong SaturatingIncrementByActiveElementCount(ulong value, Vector from); // UQINCP + + /// T: short, int, long, ushort, uint, ulong + public static unsafe Vector SaturatingIncrementByActiveElementCount(Vector value, Vector from); // SQINCP or UQINCP // MOVPRFX + + public static unsafe int SaturatingIncrementBy8BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCB + + public static unsafe long SaturatingIncrementBy8BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCB + + public static unsafe uint SaturatingIncrementBy8BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQINCB + + public static unsafe ulong SaturatingIncrementBy8BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQINCB + + public static unsafe int SaturatingIncrementBy16BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCH + + public static unsafe long SaturatingIncrementBy16BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCH + + public static unsafe uint SaturatingIncrementBy16BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQINCH + + public static unsafe ulong SaturatingIncrementBy16BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQINCH + + /// T: short, ushort + public static unsafe Vector SaturatingIncrementBy16BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCH or UQINCH // MOVPRFX + + public static unsafe int SaturatingIncrementBy32BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCW + + public static unsafe long SaturatingIncrementBy32BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCW + + public static unsafe uint SaturatingIncrementBy32BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQINCW + + public static unsafe ulong SaturatingIncrementBy32BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQINCW + + /// T: int, uint + public static unsafe Vector SaturatingIncrementBy32BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCW or UQINCW // MOVPRFX + + public static unsafe int SaturatingIncrementBy64BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCD + + public static unsafe long SaturatingIncrementBy64BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCD + + public static unsafe uint SaturatingIncrementBy64BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQINCD + + public static unsafe ulong SaturatingIncrementBy64BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQINCD + + /// T: long, ulong + public static unsafe Vector SaturatingIncrementBy64BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCD or UQINCD // MOVPRFX +} + + // All patterns used by PTRUE. + public enum SveMaskPattern : byte + { + LargestPowerOf2 = 0, // The largest power of 2. + VectorCount1 = 1, // 1 element. + VectorCount2 = 2, // 2 elements. + VectorCount3 = 3, // 3 elements. + VectorCount4 = 4, // 4 elements. + VectorCount5 = 5, // 5 elements. + VectorCount6 = 6, // 6 elements. + VectorCount7 = 7, // 7 elements. + VectorCount8 = 8, // 8 elements. + VectorCount16 = 9, // 16 elements. + VectorCount32 = 10, // 32 elements. + VectorCount64 = 11, // 64 elements. + VectorCount128 = 12, // 128 elements. + VectorCount256 = 13, // 256 elements. + LargestMultipleOf4 = 29, // The largest multiple of 4. + LargestMultipleOf3 = 30, // The largest multiple of 3. + All = 31 // All available (implicitly a multiple of two). + }; diff --git a/sve_api/post_review/apiraw_FEAT_SVE__counting_sorted.cs b/sve_api/post_review/apiraw_FEAT_SVE__counting_sorted.cs new file mode 100644 index 0000000000000..c2d758812ad4b --- /dev/null +++ b/sve_api/post_review/apiraw_FEAT_SVE__counting_sorted.cs @@ -0,0 +1,162 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: counting +{ + public static unsafe ulong Count16BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // CNTH + public static unsafe ulong Count32BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // CNTW + public static unsafe ulong Count64BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // CNTD + public static unsafe ulong Count8BitElements([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // CNTB + + /// T: byte, ushort, uint, ulong, sbyte, short, int, long, float, double + public static unsafe ulong GetActiveElementCount(Vector mask, Vector from); // CNTP + + /// T: [byte, sbyte], [ushort, short], [uint, int], [ulong, long] + public static unsafe Vector LeadingSignCount(Vector value); // CLS // predicated, MOVPRFX + + /// T: [byte, sbyte], [ushort, short], [uint, int], [ulong, long] + public static unsafe Vector LeadingZeroCount(Vector value); // CLZ // predicated, MOVPRFX + + /// T: byte, ushort, uint, ulong + public static unsafe Vector LeadingZeroCount(Vector value); // CLZ // predicated, MOVPRFX + + /// T: [uint, float], [ulong, double], [byte, sbyte], [ushort, short], [uint, int], [ulong, long] + public static unsafe Vector PopCount(Vector value); // CNT // predicated, MOVPRFX + + /// T: byte, ushort, uint, ulong + public static unsafe Vector PopCount(Vector value); // CNT // predicated, MOVPRFX + + public static unsafe int SaturatingDecrementBy16BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECH + + public static unsafe long SaturatingDecrementBy16BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECH + + public static unsafe uint SaturatingDecrementBy16BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQDECH + + public static unsafe ulong SaturatingDecrementBy16BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQDECH + + /// T: short, ushort + public static unsafe Vector SaturatingDecrementBy16BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECH or UQDECH // MOVPRFX + + public static unsafe int SaturatingDecrementBy32BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECW + + public static unsafe long SaturatingDecrementBy32BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECW + + public static unsafe uint SaturatingDecrementBy32BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQDECW + + public static unsafe ulong SaturatingDecrementBy32BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQDECW + + /// T: int, uint + public static unsafe Vector SaturatingDecrementBy32BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECW or UQDECW // MOVPRFX + + public static unsafe int SaturatingDecrementBy64BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECD + + public static unsafe long SaturatingDecrementBy64BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECD + + public static unsafe uint SaturatingDecrementBy64BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQDECD + + public static unsafe ulong SaturatingDecrementBy64BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQDECD + + /// T: long, ulong + public static unsafe Vector SaturatingDecrementBy64BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECD or UQDECD // MOVPRFX + + public static unsafe int SaturatingDecrementBy8BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECB + + public static unsafe long SaturatingDecrementBy8BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQDECB + + public static unsafe uint SaturatingDecrementBy8BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQDECB + + public static unsafe ulong SaturatingDecrementBy8BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQDECB + + /// T: byte, ushort, uint, ulong + public static unsafe int SaturatingDecrementByActiveElementCount(int value, Vector from); // SQDECP + + /// T: byte, ushort, uint, ulong + public static unsafe long SaturatingDecrementByActiveElementCount(long value, Vector from); // SQDECP + + /// T: byte, ushort, uint, ulong + public static unsafe uint SaturatingDecrementByActiveElementCount(uint value, Vector from); // UQDECP + + /// T: byte, ushort, uint, ulong + public static unsafe ulong SaturatingDecrementByActiveElementCount(ulong value, Vector from); // UQDECP + + /// T: short, int, long, ushort, uint, ulong + public static unsafe Vector SaturatingDecrementByActiveElementCount(Vector value, Vector from); // SQDECP or UQDECP // MOVPRFX + + public static unsafe int SaturatingIncrementBy16BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCH + + public static unsafe long SaturatingIncrementBy16BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCH + + public static unsafe uint SaturatingIncrementBy16BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQINCH + + public static unsafe ulong SaturatingIncrementBy16BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQINCH + + /// T: short, ushort + public static unsafe Vector SaturatingIncrementBy16BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCH or UQINCH // MOVPRFX + + public static unsafe int SaturatingIncrementBy32BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCW + + public static unsafe long SaturatingIncrementBy32BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCW + + public static unsafe uint SaturatingIncrementBy32BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQINCW + + public static unsafe ulong SaturatingIncrementBy32BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQINCW + + /// T: int, uint + public static unsafe Vector SaturatingIncrementBy32BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCW or UQINCW // MOVPRFX + + public static unsafe int SaturatingIncrementBy64BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCD + + public static unsafe long SaturatingIncrementBy64BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCD + + public static unsafe uint SaturatingIncrementBy64BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQINCD + + public static unsafe ulong SaturatingIncrementBy64BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQINCD + + /// T: long, ulong + public static unsafe Vector SaturatingIncrementBy64BitElementCount(Vector value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCD or UQINCD // MOVPRFX + + public static unsafe int SaturatingIncrementBy8BitElementCount(int value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCB + + public static unsafe long SaturatingIncrementBy8BitElementCount(long value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // SQINCB + + public static unsafe uint SaturatingIncrementBy8BitElementCount(uint value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQINCB + + public static unsafe ulong SaturatingIncrementBy8BitElementCount(ulong value, [ConstantExpected] byte scale, [ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // UQINCB + + /// T: byte, ushort, uint, ulong + public static unsafe int SaturatingIncrementByActiveElementCount(int value, Vector from); // SQINCP + + /// T: byte, ushort, uint, ulong + public static unsafe long SaturatingIncrementByActiveElementCount(long value, Vector from); // SQINCP + + /// T: byte, ushort, uint, ulong + public static unsafe uint SaturatingIncrementByActiveElementCount(uint value, Vector from); // UQINCP + + /// T: byte, ushort, uint, ulong + public static unsafe ulong SaturatingIncrementByActiveElementCount(ulong value, Vector from); // UQINCP + + /// T: short, int, long, ushort, uint, ulong + public static unsafe Vector SaturatingIncrementByActiveElementCount(Vector value, Vector from); // SQINCP or UQINCP // MOVPRFX +} + + // All patterns used by PTRUE. + public enum SveMaskPattern : byte + { + LargestPowerOf2 = 0, // The largest power of 2. + VectorCount1 = 1, // 1 element. + VectorCount2 = 2, // 2 elements. + VectorCount3 = 3, // 3 elements. + VectorCount4 = 4, // 4 elements. + VectorCount5 = 5, // 5 elements. + VectorCount6 = 6, // 6 elements. + VectorCount7 = 7, // 7 elements. + VectorCount8 = 8, // 8 elements. + VectorCount16 = 9, // 16 elements. + VectorCount32 = 10, // 32 elements. + VectorCount64 = 11, // 64 elements. + VectorCount128 = 12, // 128 elements. + VectorCount256 = 13, // 256 elements. + LargestMultipleOf4 = 29, // The largest multiple of 4. + LargestMultipleOf3 = 30, // The largest multiple of 3. + All = 31 // All available (implicitly a multiple of two). + }; diff --git a/sve_api/post_review/apiraw_FEAT_SVE__firstfaulting.cs b/sve_api/post_review/apiraw_FEAT_SVE__firstfaulting.cs new file mode 100644 index 0000000000000..35b3c11f26915 --- /dev/null +++ b/sve_api/post_review/apiraw_FEAT_SVE__firstfaulting.cs @@ -0,0 +1,146 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: firstfaulting +{ + + /// T: [int, uint], [long, ulong] + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1B + + /// T: uint, ulong + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1B + + /// T: int, uint, long, ulong + public static unsafe Vector GatherVectorByteWithByteOffsetZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets); // LDFF1B + + /// T: [uint, int], [int, uint], [ulong, long], [long, ulong] + public static unsafe Vector GatherVectorByteWithByteOffsetZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets); // LDFF1B + + /// T: [float, uint], [int, uint], [double, ulong], [long, ulong] + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses); // LDFF1W or LDFF1D + + /// T: uint, ulong + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses); // LDFF1W or LDFF1D + + /// T: [float, int], [uint, int], [float, uint], [int, uint], [double, long], [ulong, long], [double, ulong], [long, ulong] + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, T* address, Vector offsets); // LDFF1W or LDFF1D + + /// T: int, uint, long, ulong + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, T* address, Vector offsets); // LDFF1W or LDFF1D + + /// T: [float, int], [uint, int], [float, uint], [int, uint], [double, long], [ulong, long], [double, ulong], [long, ulong] + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, T* address, Vector indices); // LDFF1W or LDFF1D + + /// T: int, uint, long, ulong + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, T* address, Vector indices); // LDFF1W or LDFF1D + + /// T: [int, uint], [long, ulong] + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1SH + + /// T: uint, ulong + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1SH + + /// T: int, uint, long, ulong + public static unsafe Vector GatherVectorInt16WithByteOffsetSignExtendFirstFaulting(Vector mask, short* address, Vector offsets); // LDFF1SH + + /// T: [uint, int], [int, uint], [ulong, long], [long, ulong] + public static unsafe Vector GatherVectorInt16WithByteOffsetSignExtendFirstFaulting(Vector mask, short* address, Vector offsets); // LDFF1SH + + /// T: int, uint, long, ulong + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices); // LDFF1SH + + /// T: [uint, int], [int, uint], [ulong, long], [long, ulong] + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices); // LDFF1SH + + /// T: [int, uint], [long, ulong] + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1H + + /// T: uint, ulong + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1H + + /// T: int, uint, long, ulong + public static unsafe Vector GatherVectorUInt16WithByteOffsetZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets); // LDFF1H + + /// T: [uint, int], [int, uint], [ulong, long], [long, ulong] + public static unsafe Vector GatherVectorUInt16WithByteOffsetZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets); // LDFF1H + + /// T: int, uint, long, ulong + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices); // LDFF1H + + /// T: [uint, int], [int, uint], [ulong, long], [long, ulong] + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices); // LDFF1H + + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1SW + + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1SW + + /// T: long, ulong + public static unsafe Vector GatherVectorInt32WithByteOffsetSignExtendFirstFaulting(Vector mask, int* address, Vector offsets); // LDFF1SW + + /// T: [ulong, long], [long, ulong] + public static unsafe Vector GatherVectorInt32WithByteOffsetSignExtendFirstFaulting(Vector mask, int* address, Vector offsets); // LDFF1SW + + /// T: long, ulong + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices); // LDFF1SW + + /// T: [ulong, long], [long, ulong] + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices); // LDFF1SW + + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1W + + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1W + + /// T: long, ulong + public static unsafe Vector GatherVectorUInt32WithByteOffsetZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets); // LDFF1W + + /// T: [ulong, long], [long, ulong] + public static unsafe Vector GatherVectorUInt32WithByteOffsetZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets); // LDFF1W + + /// T: long, ulong + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices); // LDFF1W + + /// T: [ulong, long], [long, ulong] + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices); // LDFF1W + + /// T: [int, uint], [long, ulong] + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1SB + + /// T: uint, ulong + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1SB + + /// T: int, uint, long, ulong + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets); // LDFF1SB + + /// T: [uint, int], [int, uint], [ulong, long], [long, ulong] + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets); // LDFF1SB + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector GetFfr(); // RDFFR // predicated + + /// T: short, int, long, ushort, uint, ulong + public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(byte* address); // LDFF1B // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector LoadVectorFirstFaulting(T* address); // LDFF1W or LDFF1D or LDFF1B or LDFF1H // predicated + + /// T: int, long, uint, ulong + public static unsafe Vector LoadVectorInt16SignExtendFirstFaulting(short* address); // LDFF1SH // predicated + + /// T: int, long, uint, ulong + public static unsafe Vector LoadVectorUInt16ZeroExtendFirstFaulting(ushort* address); // LDFF1H // predicated + + /// T: long, ulong + public static unsafe Vector LoadVectorInt32SignExtendFirstFaulting(int* address); // LDFF1SW // predicated + + /// T: long, ulong + public static unsafe Vector LoadVectorUInt32ZeroExtendFirstFaulting(uint* address); // LDFF1W // predicated + + /// T: short, int, long, ushort, uint, ulong + public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(sbyte* address); // LDFF1SB // predicated + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe void SetFfr(Vector value); // WRFFR + + /// total method signatures: 72 + +} diff --git a/sve_api/post_review/apiraw_FEAT_SVE__firstfaulting_sorted.cs b/sve_api/post_review/apiraw_FEAT_SVE__firstfaulting_sorted.cs new file mode 100644 index 0000000000000..ebdf3cc66c6df --- /dev/null +++ b/sve_api/post_review/apiraw_FEAT_SVE__firstfaulting_sorted.cs @@ -0,0 +1,145 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: firstfaulting +{ + /// T: int, uint, long, ulong + public static unsafe Vector GatherVectorByteWithByteOffsetZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets); // LDFF1B + + /// T: [uint, int], [int, uint], [ulong, long], [long, ulong] + public static unsafe Vector GatherVectorByteWithByteOffsetZeroExtendFirstFaulting(Vector mask, byte* address, Vector offsets); // LDFF1B + + /// T: [int, uint], [long, ulong] + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1B + + /// T: uint, ulong + public static unsafe Vector GatherVectorByteZeroExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1B + + /// T: [float, uint], [int, uint], [double, ulong], [long, ulong] + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses); // LDFF1W or LDFF1D + + /// T: uint, ulong + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, Vector addresses); // LDFF1W or LDFF1D + + /// T: [float, int], [uint, int], [float, uint], [int, uint], [double, long], [ulong, long], [double, ulong], [long, ulong] + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, T* address, Vector indices); // LDFF1W or LDFF1D + + /// T: int, uint, long, ulong + public static unsafe Vector GatherVectorFirstFaulting(Vector mask, T* address, Vector indices); // LDFF1W or LDFF1D + + /// T: [int, uint], [long, ulong] + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1SH + + /// T: uint, ulong + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1SH + + /// T: int, uint, long, ulong + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices); // LDFF1SH + + /// T: [uint, int], [int, uint], [ulong, long], [long, ulong] + public static unsafe Vector GatherVectorInt16SignExtendFirstFaulting(Vector mask, short* address, Vector indices); // LDFF1SH + + /// T: int, uint, long, ulong + public static unsafe Vector GatherVectorInt16WithByteOffsetSignExtendFirstFaulting(Vector mask, short* address, Vector offsets); // LDFF1SH + + /// T: [uint, int], [int, uint], [ulong, long], [long, ulong] + public static unsafe Vector GatherVectorInt16WithByteOffsetSignExtendFirstFaulting(Vector mask, short* address, Vector offsets); // LDFF1SH + + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1SW + + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1SW + + /// T: long, ulong + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices); // LDFF1SW + + /// T: [ulong, long], [long, ulong] + public static unsafe Vector GatherVectorInt32SignExtendFirstFaulting(Vector mask, int* address, Vector indices); // LDFF1SW + + /// T: long, ulong + public static unsafe Vector GatherVectorInt32WithByteOffsetSignExtendFirstFaulting(Vector mask, int* address, Vector offsets); // LDFF1SW + + /// T: [ulong, long], [long, ulong] + public static unsafe Vector GatherVectorInt32WithByteOffsetSignExtendFirstFaulting(Vector mask, int* address, Vector offsets); // LDFF1SW + + /// T: [int, uint], [long, ulong] + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1SB + + /// T: uint, ulong + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1SB + + /// T: int, uint, long, ulong + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets); // LDFF1SB + + /// T: [uint, int], [int, uint], [ulong, long], [long, ulong] + public static unsafe Vector GatherVectorSByteSignExtendFirstFaulting(Vector mask, sbyte* address, Vector offsets); // LDFF1SB + + /// T: int, uint, long, ulong + public static unsafe Vector GatherVectorUInt16WithByteOffsetZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets); // LDFF1H + + /// T: [uint, int], [int, uint], [ulong, long], [long, ulong] + public static unsafe Vector GatherVectorUInt16WithByteOffsetZeroExtendFirstFaulting(Vector mask, ushort* address, Vector offsets); // LDFF1H + + /// T: [int, uint], [long, ulong] + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1H + + /// T: uint, ulong + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1H + + /// T: int, uint, long, ulong + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices); // LDFF1H + + /// T: [uint, int], [int, uint], [ulong, long], [long, ulong] + public static unsafe Vector GatherVectorUInt16ZeroExtendFirstFaulting(Vector mask, ushort* address, Vector indices); // LDFF1H + + /// T: long, ulong + public static unsafe Vector GatherVectorUInt32WithByteOffsetZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets); // LDFF1W + + /// T: [ulong, long], [long, ulong] + public static unsafe Vector GatherVectorUInt32WithByteOffsetZeroExtendFirstFaulting(Vector mask, uint* address, Vector offsets); // LDFF1W + + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1W + + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, Vector addresses); // LDFF1W + + /// T: long, ulong + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices); // LDFF1W + + /// T: [ulong, long], [long, ulong] + public static unsafe Vector GatherVectorUInt32ZeroExtendFirstFaulting(Vector mask, uint* address, Vector indices); // LDFF1W + + /// T: [float, int], [uint, int], [float, uint], [int, uint], [double, long], [ulong, long], [double, ulong], [long, ulong] + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, T* address, Vector offsets); // LDFF1W or LDFF1D + + /// T: int, uint, long, ulong + public static unsafe Vector GatherVectorWithByteOffsetFirstFaulting(Vector mask, T* address, Vector offsets); // LDFF1W or LDFF1D + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector GetFfr(); // RDFFR // predicated + + /// T: short, int, long, ushort, uint, ulong + public static unsafe Vector LoadVectorByteZeroExtendFirstFaulting(byte* address); // LDFF1B // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector LoadVectorFirstFaulting(T* address); // LDFF1W or LDFF1D or LDFF1B or LDFF1H // predicated + + /// T: int, long, uint, ulong + public static unsafe Vector LoadVectorInt16SignExtendFirstFaulting(short* address); // LDFF1SH // predicated + + /// T: long, ulong + public static unsafe Vector LoadVectorInt32SignExtendFirstFaulting(int* address); // LDFF1SW // predicated + + /// T: short, int, long, ushort, uint, ulong + public static unsafe Vector LoadVectorSByteSignExtendFirstFaulting(sbyte* address); // LDFF1SB // predicated + + /// T: int, long, uint, ulong + public static unsafe Vector LoadVectorUInt16ZeroExtendFirstFaulting(ushort* address); // LDFF1H // predicated + + /// T: long, ulong + public static unsafe Vector LoadVectorUInt32ZeroExtendFirstFaulting(uint* address); // LDFF1W // predicated + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe void SetFfr(Vector value); // WRFFR + + /// total method signatures: 72 + +} diff --git a/sve_api/post_review/apiraw_FEAT_SVE__fp.cs b/sve_api/post_review/apiraw_FEAT_SVE__fp.cs new file mode 100644 index 0000000000000..b8e283c7e369e --- /dev/null +++ b/sve_api/post_review/apiraw_FEAT_SVE__fp.cs @@ -0,0 +1,82 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract class Sve : AdvSimd /// Feature: FEAT_SVE Category: fp +{ + /// T: float, double + public static unsafe Vector AddRotateComplex(Vector left, Vector right, [ConstantExpected] byte rotation); // FCADD // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector AddSequentialAcross(Vector initial, Vector value); // FADDA // predicated + + /// T: [double, float], [double, int], [double, long], [double, uint], [double, ulong] + public static unsafe Vector ConvertToDouble(Vector value); // FCVT or SCVTF or UCVTF // predicated, MOVPRFX + + /// T: [int, float], [int, double] + public static unsafe Vector ConvertToInt32(Vector value); // FCVTZS // predicated, MOVPRFX + + /// T: [long, float], [long, double] + public static unsafe Vector ConvertToInt64(Vector value); // FCVTZS // predicated, MOVPRFX + + /// T: [float, double], [float, int], [float, long], [float, uint], [float, ulong] + public static unsafe Vector ConvertToSingle(Vector value); // FCVT or SCVTF or UCVTF // predicated, MOVPRFX + + /// T: [uint, float], [uint, double] + public static unsafe Vector ConvertToUInt32(Vector value); // FCVTZU // predicated, MOVPRFX + + /// T: [ulong, float], [ulong, double] + public static unsafe Vector ConvertToUInt64(Vector value); // FCVTZU // predicated, MOVPRFX + + /// T: [float, uint], [double, ulong] + public static unsafe Vector FloatingPointExponentialAccelerator(Vector value); // FEXPA + + /// T: float, double + public static unsafe Vector MultiplyAddRotateComplex(Vector addend, Vector left, Vector right, [ConstantExpected] byte rotation); // FCMLA // predicated, MOVPRFX + + public static unsafe Vector MultiplyAddRotateComplexBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex, [ConstantExpected] byte rotation); // FCMLA // MOVPRFX + + /// T: float, double + public static unsafe Vector ReciprocalEstimate(Vector value); // FRECPE + + /// T: float, double + public static unsafe Vector ReciprocalExponent(Vector value); // FRECPX // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector ReciprocalSqrtEstimate(Vector value); // FRSQRTE + + /// T: float, double + public static unsafe Vector ReciprocalSqrtStep(Vector left, Vector right); // FRSQRTS + + /// T: float, double + public static unsafe Vector ReciprocalStep(Vector left, Vector right); // FRECPS + + /// T: float, double + public static unsafe Vector RoundAwayFromZero(Vector value); // FRINTA // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector RoundToNearest(Vector value); // FRINTN // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector RoundToNegativeInfinity(Vector value); // FRINTM // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector RoundToPositiveInfinity(Vector value); // FRINTP // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector RoundToZero(Vector value); // FRINTZ // predicated, MOVPRFX + + /// T: [float, int], [double, long] + public static unsafe Vector Scale(Vector left, Vector right); // FSCALE // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector Sqrt(Vector value); // FSQRT // predicated, MOVPRFX + + /// T: float, double + public static unsafe Vector TrigonometricMultiplyAddCoefficient(Vector left, Vector right, [ConstantExpected] byte control); // FTMAD // MOVPRFX + + /// T: [float, uint], [double, ulong] + public static unsafe Vector TrigonometricSelectCoefficient(Vector value, Vector selector); // FTSSEL + + /// T: [float, uint], [double, ulong] + public static unsafe Vector TrigonometricStartingValue(Vector value, Vector sign); // FTSMUL +} \ No newline at end of file diff --git a/sve_api/post_review/apiraw_FEAT_SVE__gather_loads.cs b/sve_api/post_review/apiraw_FEAT_SVE__gather_loads.cs new file mode 100644 index 0000000000000..32da944c82ae6 --- /dev/null +++ b/sve_api/post_review/apiraw_FEAT_SVE__gather_loads.cs @@ -0,0 +1,101 @@ +namespace System.Runtime.Intrinsics.Arm; + +public enum SvePrefetchType +{ + LoadL1Temporal = 0, + LoadL1NonTemporal = 1, + LoadL2Temporal = 2, + LoadL2NonTemporal = 3, + LoadL3Temporal = 4, + LoadL3NonTemporal = 5, + StoreL1Temporal = 8, + StoreL1NonTemporal = 9, + StoreL2Temporal = 10, + StoreL2NonTemporal = 11, + StoreL3Temporal = 12, + StoreL3NonTemporal = 13 +}; + +public abstract partial class Sve : AdvSimd +{ + /// T: [byte, uint], [sbyte, uint], [byte, ulong], [sbyte, ulong] + public static unsafe void GatherPrefetch8Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); + + /// T: [byte, int], [sbyte, int], [byte, uint], [sbyte, uint], [byte, long], [sbyte, long], [byte, ulong], [sbyte, ulong] + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType); + + /// T: [short, uint], [ushort, uint], [short, ulong], [ushort, ulong] + public static unsafe void GatherPrefetch16Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); + + /// T: [short, int], [ushort, int], [short, uint], [ushort, uint], [short, long], [ushort, long], [short, ulong], [ushort, ulong] + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + /// T: [int, uint], [uint, uint], [int, ulong], [uint, ulong] + public static unsafe void GatherPrefetch32Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [int, long], [uint, long], [int, ulong], [uint, ulong] + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + /// T: [long, uint], [ulong, uint], [long, ulong], [ulong, ulong] + public static unsafe void GatherPrefetch64Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); + + /// T: [long, int], [ulong, int], [long, uint], [ulong, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + /// T: [float, uint], [int, uint], [uint, uint], [double, ulong], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVector(Vector mask, Vector addresses); + + /// T: [float, int], [int, int], [uint, int], [float, uint], [int, uint], [uint, uint], [double, long], [long, long], [ulong, long], [double, ulong], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVector(Vector mask, T* address, Vector indices); + + /// T: [float, int], [int, int], [uint, int], [float, uint], [int, uint], [uint, uint], [double, long], [long, long], [ulong, long], [double, ulong], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, T* address, Vector offsets); + + /// T: [int, uint], [uint, uint], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, Vector addresses); + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices); + + /// T: [int, uint], [uint, uint], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, Vector addresses); + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices); + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets); + + /// T: [int, uint], [uint, uint], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, Vector addresses); + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, short* address, Vector indices); + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, short* address, Vector offsets); + + /// T: [int, uint], [uint, uint], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, Vector addresses); + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, short* address, Vector indices); + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets); + + /// T: [int, uint], [uint, uint], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, Vector addresses); + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, short* address, Vector indices); + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, short* address, Vector offsets); + + /// T: [int, uint], [uint, uint], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, Vector addresses); + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices); +} diff --git a/sve_api/post_review/apiraw_FEAT_SVE__gather_loads_sorted.cs b/sve_api/post_review/apiraw_FEAT_SVE__gather_loads_sorted.cs new file mode 100644 index 0000000000000..f2ff7902a1ab3 --- /dev/null +++ b/sve_api/post_review/apiraw_FEAT_SVE__gather_loads_sorted.cs @@ -0,0 +1,103 @@ +namespace System.Runtime.Intrinsics.Arm; + +public abstract partial class Sve : AdvSimd +{ + + /// T: [short, uint], [ushort, uint], [short, ulong], [ushort, ulong] + public static unsafe void GatherPrefetch16Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); + + /// T: [short, int], [ushort, int], [short, uint], [ushort, uint], [short, long], [ushort, long], [short, ulong], [ushort, ulong] + public static unsafe void GatherPrefetch16Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + /// T: [int, uint], [uint, uint], [int, ulong], [uint, ulong] + public static unsafe void GatherPrefetch32Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [int, long], [uint, long], [int, ulong], [uint, ulong] + public static unsafe void GatherPrefetch32Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + /// T: [long, uint], [ulong, uint], [long, ulong], [ulong, ulong] + public static unsafe void GatherPrefetch64Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); + + /// T: [long, int], [ulong, int], [long, uint], [ulong, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe void GatherPrefetch64Bit(Vector mask, void* address, Vector indices, [ConstantExpected] SvePrefetchType prefetchType); + + /// T: [byte, uint], [sbyte, uint], [byte, ulong], [sbyte, ulong] + public static unsafe void GatherPrefetch8Bit(Vector mask, Vector addresses, [ConstantExpected] SvePrefetchType prefetchType); + + /// T: [byte, int], [sbyte, int], [byte, uint], [sbyte, uint], [byte, long], [sbyte, long], [byte, ulong], [sbyte, ulong] + public static unsafe void GatherPrefetch8Bit(Vector mask, void* address, Vector offsets, [ConstantExpected] SvePrefetchType prefetchType); + + /// T: [float, uint], [int, uint], [uint, uint], [double, ulong], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVector(Vector mask, Vector addresses); + + /// T: [float, int], [int, int], [uint, int], [float, uint], [int, uint], [uint, uint], [double, long], [long, long], [ulong, long], [double, ulong], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVector(Vector mask, T* address, Vector indices); + + /// T: [int, uint], [uint, uint], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, Vector addresses); + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorByteZeroExtend(Vector mask, byte* address, Vector indices); + + /// T: [int, uint], [uint, uint], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, Vector addresses); + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorInt16SignExtend(Vector mask, short* address, Vector indices); + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorInt16WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets); + + /// T: [int, uint], [uint, uint], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, Vector addresses); + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorInt32SignExtend(Vector mask, short* address, Vector indices); + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorInt32WithByteOffsetsSignExtend(Vector mask, short* address, Vector offsets); + + /// T: [int, uint], [uint, uint], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, Vector addresses); + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorSByteSignExtend(Vector mask, sbyte* address, Vector indices); + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorUInt16WithByteOffsetsZeroExtend(Vector mask, short* address, Vector offsets); + + /// T: [int, uint], [uint, uint], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, Vector addresses); + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorUInt16ZeroExtend(Vector mask, short* address, Vector indices); + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorUInt32WithByteOffsetsZeroExtend(Vector mask, short* address, Vector offsets); + + /// T: [int, uint], [uint, uint], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, Vector addresses); + + /// T: [int, int], [uint, int], [int, uint], [uint, uint], [long, long], [ulong, long], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorUInt32ZeroExtend(Vector mask, short* address, Vector indices); + + /// T: [float, int], [int, int], [uint, int], [float, uint], [int, uint], [uint, uint], [double, long], [long, long], [ulong, long], [double, ulong], [long, ulong], [ulong, ulong] + public static unsafe Vector GatherVectorWithByteOffsets(Vector mask, T* address, Vector offsets); + +} + +public enum SvePrefetchType +{ + LoadL1Temporal = 0, + LoadL1NonTemporal = 1, + LoadL2Temporal = 2, + LoadL2NonTemporal = 3, + LoadL3Temporal = 4, + LoadL3NonTemporal = 5, + StoreL1Temporal = 8, + StoreL1NonTemporal = 9, + StoreL2Temporal = 10, + StoreL2NonTemporal = 11, + StoreL3Temporal = 12, + StoreL3NonTemporal = 13 +}; diff --git a/sve_api/post_review/apiraw_FEAT_SVE__loads.cs b/sve_api/post_review/apiraw_FEAT_SVE__loads.cs new file mode 100644 index 0000000000000..4d9564f6c4b84 --- /dev/null +++ b/sve_api/post_review/apiraw_FEAT_SVE__loads.cs @@ -0,0 +1,138 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract class Sve : AdvSimd /// Feature: FEAT_SVE Category: loads +{ + + /// T: [uint, int], [ulong, long] + public static unsafe Vector Compute8BitAddresses(Vector bases, Vector indices); // ADR + + /// T: uint, ulong + public static unsafe Vector Compute8BitAddresses(Vector bases, Vector indices); // ADR + + /// T: [uint, int], [ulong, long] + public static unsafe Vector Compute16BitAddresses(Vector bases, Vector indices); // ADR + + /// T: uint, ulong + public static unsafe Vector Compute16BitAddresses(Vector bases, Vector indices); // ADR + + /// T: [uint, int], [ulong, long] + public static unsafe Vector Compute32BitAddresses(Vector bases, Vector indices); // ADR + + /// T: uint, ulong + public static unsafe Vector Compute32BitAddresses(Vector bases, Vector indices); // ADR + + /// T: [uint, int], [ulong, long] + public static unsafe Vector Compute64BitAddresses(Vector bases, Vector indices); // ADR + + /// T: uint, ulong + public static unsafe Vector Compute64BitAddresses(Vector bases, Vector indices); // ADR + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector LoadVector(T* address); // LD1W or LD1D or LD1B or LD1H + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, T* address); // LD1RQW or LD1RQD or LD1RQB or LD1RQH + + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToInt16(Vector mask, sbyte* address); // LDNF1SB + + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToInt32(Vector mask, sbyte* address); // LDNF1SB + + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToInt64(Vector mask, sbyte* address); // LDNF1SB + + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToUInt16(Vector mask, sbyte* address); // LDNF1SB + + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToUInt32(Vector mask, sbyte* address); // LDNF1SB + + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToUInt64(Vector mask, sbyte* address); // LDNF1SB + + public static unsafe Vector LoadVectorSByteSignExtendToInt16(sbyte* address); // LD1SB + + public static unsafe Vector LoadVectorSByteSignExtendToInt32(sbyte* address); // LD1SB + + public static unsafe Vector LoadVectorByteSignExtendToInt64(sbyte* address); // LD1SB + + public static unsafe Vector LoadVectorByteSignExtendToUInt16(sbyte* address); // LD1SB + + public static unsafe Vector LoadVectorByteSignExtendToUInt32(sbyte* address); // LD1SB + + public static unsafe Vector LoadVectorByteSignExtendToUInt64(sbyte* address); // LD1SB + + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToInt16(Vector mask, byte* address); // LDNF1B + + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToInt32(Vector mask, byte* address); // LDNF1B + + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToInt64(Vector mask, byte* address); // LDNF1B + + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToUInt16(Vector mask, byte* address); // LDNF1B + + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToUInt32(Vector mask, byte* address); // LDNF1B + + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToUInt64(Vector mask, byte* address); // LDNF1B + + public static unsafe Vector LoadVectorByteZeroExtendToInt16(byte* address); // LD1B + + public static unsafe Vector LoadVectorByteZeroExtendToInt32(byte* address); // LD1B + + public static unsafe Vector LoadVectorByteZeroExtendToInt64(byte* address); // LD1B + + public static unsafe Vector LoadVectorByteZeroExtendToUInt16(byte* address); // LD1B + + public static unsafe Vector LoadVectorByteZeroExtendToUInt32(byte* address); // LD1B + + public static unsafe Vector LoadVectorByteZeroExtendToUInt64(byte* address); // LD1B + + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToInt32(Vector mask, short* address); // LDNF1SH + + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToInt64(Vector mask, short* address); // LDNF1SH + + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToUInt32(Vector mask, short* address); // LDNF1SH + + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToUInt64(Vector mask, short* address); // LDNF1SH + + public static unsafe Vector LoadVectorInt16SignExtendToInt32(short* address); // LD1SH + + public static unsafe Vector LoadVectorInt16SignExtendToInt64(short* address); // LD1SH + + public static unsafe Vector LoadVectorInt16SignExtendToUInt32(short* address); // LD1SH + + public static unsafe Vector LoadVectorInt16SignExtendToUInt64(short* address); // LD1SH + + public static unsafe Vector LoadVectorInt16NonFaultingZeroExtendToInt32(Vector mask, ushort* address); // LDNF1H + + public static unsafe Vector LoadVectorInt16NonFaultingZeroExtendToInt64(Vector mask, ushort* address); // LDNF1H + + public static unsafe Vector LoadVectorInt16NonFaultingZeroExtendToUInt32(Vector mask, ushort* address); // LDNF1H + + public static unsafe Vector LoadVectorInt16NonFaultingZeroExtendToUInt64(Vector mask, ushort* address); // LDNF1H + + public static unsafe Vector LoadVectorInt16ZeroExtendToInt32(ushort* address); // LD1H + + public static unsafe Vector LoadVectorInt16ZeroExtendToInt64(ushort* address); // LD1H + + public static unsafe Vector LoadVectorInt16ZeroExtendToUInt32(ushort* address); // LD1H + + public static unsafe Vector LoadVectorInt16ZeroExtendToUInt64(ushort* address); // LD1H + + public static unsafe Vector LoadVectorInt32NonFaultingSignExtendToInt64(Vector mask, int* address); // LDNF1SW + + public static unsafe Vector LoadVectorInt32NonFaultingSignExtendToUInt64(Vector mask, int* address); // LDNF1SW + + public static unsafe Vector LoadVectorInt32SignExtendToInt64(int* address); // LD1SW + + public static unsafe Vector LoadVectorInt32SignExtendToUInt64(int* address); // LD1SW + + public static unsafe Vector LoadVectorInt32NonFaultingZeroExtendToInt64(Vector mask, uint* address); // LDNF1W + + public static unsafe Vector LoadVectorInt32NonFaultingZeroExtendToUInt64(Vector mask, uint* address); // LDNF1W + + public static unsafe Vector LoadVectorInt32ZeroExtendToInt64(uint* address); // LD1W + + public static unsafe Vector LoadVectorInt32ZeroExtendToUInt64(uint* address); // LD1W + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector LoadVectorNonFaulting(Vector mask, T* address); // LDNF1W or LDNF1D or LDNF1B or LDNF1H + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector LoadVectorNonTemporal(T* address); // LDNT1W or LDNT1D or LDNT1B or LDNT1H +} \ No newline at end of file diff --git a/sve_api/post_review/apiraw_FEAT_SVE__loads_sorted.cs b/sve_api/post_review/apiraw_FEAT_SVE__loads_sorted.cs new file mode 100644 index 0000000000000..8a59bfeabcd98 --- /dev/null +++ b/sve_api/post_review/apiraw_FEAT_SVE__loads_sorted.cs @@ -0,0 +1,139 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract class Sve : AdvSimd /// Feature: FEAT_SVE Category: loads +{ + + /// T: [uint, int], [ulong, long] + public static unsafe Vector Compute16BitAddresses(Vector bases, Vector indices); // ADR + + /// T: uint, ulong + public static unsafe Vector Compute16BitAddresses(Vector bases, Vector indices); // ADR + + /// T: [uint, int], [ulong, long] + public static unsafe Vector Compute32BitAddresses(Vector bases, Vector indices); // ADR + + /// T: uint, ulong + public static unsafe Vector Compute32BitAddresses(Vector bases, Vector indices); // ADR + + /// T: [uint, int], [ulong, long] + public static unsafe Vector Compute64BitAddresses(Vector bases, Vector indices); // ADR + + /// T: uint, ulong + public static unsafe Vector Compute64BitAddresses(Vector bases, Vector indices); // ADR + + /// T: [uint, int], [ulong, long] + public static unsafe Vector Compute8BitAddresses(Vector bases, Vector indices); // ADR + + /// T: uint, ulong + public static unsafe Vector Compute8BitAddresses(Vector bases, Vector indices); // ADR + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector LoadVector(T* address); // LD1W or LD1D or LD1B or LD1H + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector LoadVector128AndReplicateToVector(Vector mask, T* address); // LD1RQW or LD1RQD or LD1RQB or LD1RQH + + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToInt16(Vector mask, byte* address); // LDNF1B + + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToInt32(Vector mask, byte* address); // LDNF1B + + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToInt64(Vector mask, byte* address); // LDNF1B + + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToUInt16(Vector mask, byte* address); // LDNF1B + + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToUInt32(Vector mask, byte* address); // LDNF1B + + public static unsafe Vector LoadVectorByteNonFaultingZeroExtendToUInt64(Vector mask, byte* address); // LDNF1B + + public static unsafe Vector LoadVectorByteZeroExtendToInt16(byte* address); // LD1B + + public static unsafe Vector LoadVectorByteZeroExtendToInt32(byte* address); // LD1B + + public static unsafe Vector LoadVectorByteZeroExtendToInt64(byte* address); // LD1B + + public static unsafe Vector LoadVectorByteZeroExtendToUInt16(byte* address); // LD1B + + public static unsafe Vector LoadVectorByteZeroExtendToUInt32(byte* address); // LD1B + + public static unsafe Vector LoadVectorByteZeroExtendToUInt64(byte* address); // LD1B + + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToInt32(Vector mask, short* address); // LDNF1SH + + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToInt64(Vector mask, short* address); // LDNF1SH + + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToUInt32(Vector mask, short* address); // LDNF1SH + + public static unsafe Vector LoadVectorInt16NonFaultingSignExtendToUInt64(Vector mask, short* address); // LDNF1SH + + public static unsafe Vector LoadVectorInt16NonFaultingZeroExtendToInt32(Vector mask, ushort* address); // LDNF1H + + public static unsafe Vector LoadVectorInt16NonFaultingZeroExtendToInt64(Vector mask, ushort* address); // LDNF1H + + public static unsafe Vector LoadVectorInt16NonFaultingZeroExtendToUInt32(Vector mask, ushort* address); // LDNF1H + + public static unsafe Vector LoadVectorInt16NonFaultingZeroExtendToUInt64(Vector mask, ushort* address); // LDNF1H + + public static unsafe Vector LoadVectorInt16SignExtendToInt32(short* address); // LD1SH + + public static unsafe Vector LoadVectorInt16SignExtendToInt64(short* address); // LD1SH + + public static unsafe Vector LoadVectorInt16SignExtendToUInt32(short* address); // LD1SH + + public static unsafe Vector LoadVectorInt16SignExtendToUInt64(short* address); // LD1SH + + public static unsafe Vector LoadVectorInt32NonFaultingSignExtendToInt64(Vector mask, int* address); // LDNF1SW + + public static unsafe Vector LoadVectorInt32NonFaultingSignExtendToUInt64(Vector mask, int* address); // LDNF1SW + + public static unsafe Vector LoadVectorInt32NonFaultingZeroExtendToInt64(Vector mask, uint* address); // LDNF1W + + public static unsafe Vector LoadVectorInt32NonFaultingZeroExtendToUInt64(Vector mask, uint* address); // LDNF1W + + public static unsafe Vector LoadVectorInt32SignExtendToInt64(int* address); // LD1SW + + public static unsafe Vector LoadVectorInt32SignExtendToUInt64(int* address); // LD1SW + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector LoadVectorNonFaulting(Vector mask, T* address); // LDNF1W or LDNF1D or LDNF1B or LDNF1H + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector LoadVectorNonTemporal(T* address); // LDNT1W or LDNT1D or LDNT1B or LDNT1H + + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToInt16(Vector mask, sbyte* address); // LDNF1SB + + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToInt32(Vector mask, sbyte* address); // LDNF1SB + + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToInt64(Vector mask, sbyte* address); // LDNF1SB + + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToUInt16(Vector mask, sbyte* address); // LDNF1SB + + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToUInt32(Vector mask, sbyte* address); // LDNF1SB + + public static unsafe Vector LoadVectorSByteNonFaultingSignExtendToUInt64(Vector mask, sbyte* address); // LDNF1SB + + public static unsafe Vector LoadVectorSByteSignExtendToInt16(sbyte* address); // LD1SB + + public static unsafe Vector LoadVectorSByteSignExtendToInt32(sbyte* address); // LD1SB + + public static unsafe Vector LoadVectorByteSignExtendToInt64(sbyte* address); // LD1SB + + public static unsafe Vector LoadVectorByteSignExtendToUInt16(sbyte* address); // LD1SB + + public static unsafe Vector LoadVectorByteSignExtendToUInt32(sbyte* address); // LD1SB + + public static unsafe Vector LoadVectorByteSignExtendToUInt64(sbyte* address); // LD1SB + + public static unsafe Vector LoadVectorInt16ZeroExtendToInt32(ushort* address); // LD1H + + public static unsafe Vector LoadVectorInt16ZeroExtendToInt64(ushort* address); // LD1H + + public static unsafe Vector LoadVectorInt16ZeroExtendToUInt32(ushort* address); // LD1H + + public static unsafe Vector LoadVectorInt16ZeroExtendToUInt64(ushort* address); // LD1H + + public static unsafe Vector LoadVectorInt32ZeroExtendToInt64(uint* address); // LD1W + + public static unsafe Vector LoadVectorInt32ZeroExtendToUInt64(uint* address); // LD1W + +} \ No newline at end of file diff --git a/sve_api/post_review/apiraw_FEAT_SVE__mask.cs b/sve_api/post_review/apiraw_FEAT_SVE__mask.cs new file mode 100644 index 0000000000000..d9021c4d21f58 --- /dev/null +++ b/sve_api/post_review/apiraw_FEAT_SVE__mask.cs @@ -0,0 +1,69 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: mask +{ + /// T: float, double + public static unsafe Vector AbsoluteCompareGreaterThan(Vector left, Vector right); // FACGT // predicated + + /// T: float, double + public static unsafe Vector AbsoluteCompareGreaterThanOrEqual(Vector left, Vector right); // FACGE // predicated + + /// T: float, double + public static unsafe Vector AbsoluteCompareLessThan(Vector left, Vector right); // FACGT // predicated + + /// T: float, double + public static unsafe Vector AbsoluteCompareLessThanOrEqual(Vector left, Vector right); // FACGE // predicated + + /// T: float, double, int, long, uint, ulong + public static unsafe Vector Compact(Vector mask, Vector value); // COMPACT + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CompareEqual(Vector left, Vector right); // FCMEQ or CMPEQ // predicated + + /// T: [sbyte, long], [short, long], [int, long] + public static unsafe Vector CompareEqual(Vector left, Vector right); // CMPEQ // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CompareGreaterThan(Vector left, Vector right); // FCMGT or CMPGT or CMPHI // predicated + + /// T: [sbyte, long], [short, long], [int, long], [byte, ulong], [ushort, ulong], [uint, ulong] + public static unsafe Vector CompareGreaterThan(Vector left, Vector right); // CMPGT or CMPHI // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right); // FCMGE or CMPGE or CMPHS // predicated + + /// T: [sbyte, long], [short, long], [int, long], [byte, ulong], [ushort, ulong], [uint, ulong] + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right); // CMPGE or CMPHS // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CompareLessThan(Vector left, Vector right); // FCMGT or CMPGT or CMPHI // predicated + + /// T: [sbyte, long], [short, long], [int, long], [byte, ulong], [ushort, ulong], [uint, ulong] + public static unsafe Vector CompareLessThan(Vector left, Vector right); // CMPLT or CMPLO // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right); // FCMGE or CMPGE or CMPHS // predicated + + /// T: [sbyte, long], [short, long], [int, long], [byte, ulong], [ushort, ulong], [uint, ulong] + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right); // CMPLE or CMPLS // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right); // FCMNE or CMPNE // predicated + + /// T: [sbyte, long], [short, long], [int, long] + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right); // CMPNE // predicated + + /// T: float, double + public static unsafe Vector CompareUnordered(Vector left, Vector right); // FCMUO // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultScalar, Vector data); // CLASTA // MOVPRFX + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultValues, Vector data); // CLASTA // MOVPRFX + public static unsafe T ConditionalExtractAfterLastActiveElement(Vector mask, T defaultValue, Vector data); // CLASTA // MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultScalar, Vector data); // CLASTB // MOVPRFX + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector defaultValues, Vector data); // CLASTB // MOVPRFX + public static unsafe T ConditionalExtractLastActiveElement(Vector mask, T defaultValue, Vector data); // CLASTB // MOVPRFX +} diff --git a/sve_api/post_review/apiraw_FEAT_SVE__mask_part2.cs b/sve_api/post_review/apiraw_FEAT_SVE__mask_part2.cs new file mode 100644 index 0000000000000..86a3e4b646977 --- /dev/null +++ b/sve_api/post_review/apiraw_FEAT_SVE__mask_part2.cs @@ -0,0 +1,81 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: mask +{ + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right); // SEL + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask); // BRKA // predicated + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right); // BRKPA + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask); // BRKB // predicated + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right); // BRKPB + + /// T: byte, ushort, uint, ulong + public static unsafe Vector CreateWhileLessThanMask{8,16,32,64}Bit(int left, int right); // WHILELT + + /// T: byte, ushort, uint, ulong + public static unsafe Vector CreateWhileLessThanMask{8,16,32,64}Bit(long left, long right); // WHILELT + + /// T: byte, ushort, uint, ulong + public static unsafe Vector CreateWhileLessThanMask{8,16,32,64}Bit(uint left, uint right); // WHILELO + + /// T: byte, ushort, uint, ulong + public static unsafe Vector CreateWhileLessThanMask{8,16,32,64}Bit(ulong left, ulong right); // WHILELO + + /// T: byte, ushort, uint, ulong + public static unsafe Vector CreateWhileLessThanOrEqualMask{8,16,32,64}Bit(int left, int right); // WHILELE + + /// T: byte, ushort, uint, ulong + public static unsafe Vector CreateWhileLessThanOrEqualMask{8,16,32,64}Bit(long left, long right); // WHILELE + + /// T: byte, ushort, uint, ulong + public static unsafe Vector CreateWhileLessThanOrEqualMask{8,16,32,64}Bit(uint left, uint right); // WHILELS + + /// T: byte, ushort, uint, ulong + public static unsafe Vector CreateWhileLessThanOrEqualMask{8,16,32,64}Bit(ulong left, ulong right); // WHILELS + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe T ExtractAfterLastScalar(Vector value); // LASTA // predicated + public static unsafe Vector ExtractAfterLastVector(Vector value); // LASTA // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe T ExtractLastScalar(Vector value); // LASTB // predicated + public static unsafe Vector ExtractLastVector(Vector value); // LASTB // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstExpected] byte index); // EXT // MOVPRFX + + public static unsafe Vector CreateFalseMaskByte(); // PFALSE + public static unsafe Vector CreateFalseMaskSByte(); // PFALSE + // repeat for Int16, UInt16, Int32, UInt32, Int64, UInt64, Single, Double + + /// T: byte, ushort, uint, ulong + public static unsafe Vector CreateMaskForNextActiveElement(Vector totalMask, Vector fromMask); // PNEXT + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask); // PFIRST + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask); // PTEST + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask); // PTEST + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask); // PTEST + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask); // BRKN // predicated + + public static unsafe Vector CreateTrueMaskByte([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // PTRUE + public static unsafe Vector CreateTrueMaskSByte([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // PTRUE + // repeat for Int16, UInt16, Int32, UInt32, Int64, UInt64, Single, Double +} \ No newline at end of file diff --git a/sve_api/post_review/apiraw_FEAT_SVE__mask_sorted.cs b/sve_api/post_review/apiraw_FEAT_SVE__mask_sorted.cs new file mode 100644 index 0000000000000..053c86e57da94 --- /dev/null +++ b/sve_api/post_review/apiraw_FEAT_SVE__mask_sorted.cs @@ -0,0 +1,159 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: mask +{ + + /// T: float, double + public static unsafe Vector AbsoluteCompareGreaterThan(Vector left, Vector right); // FACGT // predicated + + /// T: float, double + public static unsafe Vector AbsoluteCompareGreaterThanOrEqual(Vector left, Vector right); // FACGE // predicated + + /// T: float, double + public static unsafe Vector AbsoluteCompareLessThan(Vector left, Vector right); // FACGT // predicated + + /// T: float, double + public static unsafe Vector AbsoluteCompareLessThanOrEqual(Vector left, Vector right); // FACGE // predicated + + /// T: float, double, int, long, uint, ulong + public static unsafe Vector Compact(Vector mask, Vector value); // COMPACT + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CompareEqual(Vector left, Vector right); // FCMEQ or CMPEQ // predicated + + /// T: [sbyte, long], [short, long], [int, long] + public static unsafe Vector CompareEqual(Vector left, Vector right); // CMPEQ // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CompareGreaterThan(Vector left, Vector right); // FCMGT or CMPGT or CMPHI // predicated + + /// T: [sbyte, long], [short, long], [int, long], [byte, ulong], [ushort, ulong], [uint, ulong] + public static unsafe Vector CompareGreaterThan(Vector left, Vector right); // CMPGT or CMPHI // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right); // FCMGE or CMPGE or CMPHS // predicated + + /// T: [sbyte, long], [short, long], [int, long], [byte, ulong], [ushort, ulong], [uint, ulong] + public static unsafe Vector CompareGreaterThanOrEqual(Vector left, Vector right); // CMPGE or CMPHS // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CompareLessThan(Vector left, Vector right); // FCMGT or CMPGT or CMPHI // predicated + + /// T: [sbyte, long], [short, long], [int, long], [byte, ulong], [ushort, ulong], [uint, ulong] + public static unsafe Vector CompareLessThan(Vector left, Vector right); // CMPLT or CMPLO // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right); // FCMGE or CMPGE or CMPHS // predicated + + /// T: [sbyte, long], [short, long], [int, long], [byte, ulong], [ushort, ulong], [uint, ulong] + public static unsafe Vector CompareLessThanOrEqual(Vector left, Vector right); // CMPLE or CMPLS // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right); // FCMNE or CMPNE // predicated + + /// T: [sbyte, long], [short, long], [int, long] + public static unsafe Vector CompareNotEqualTo(Vector left, Vector right); // CMPNE // predicated + + /// T: float, double + public static unsafe Vector CompareUnordered(Vector left, Vector right); // FCMUO // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ConditionalExtractAfterLastActiveElement(Vector mask, Vector defaultScalar, Vector data); // CLASTA // MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe T ConditionalExtractAfterLastActiveElement(Vector mask, T defaultValue, Vector data); // CLASTA // MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ConditionalExtractAfterLastActiveElementAndReplicate(Vector mask, Vector defaultValues, Vector data); // CLASTA // MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ConditionalExtractLastActiveElement(Vector mask, Vector defaultScalar, Vector data); // CLASTB // MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe T ConditionalExtractLastActiveElement(Vector mask, T defaultValue, Vector data); // CLASTB // MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ConditionalExtractLastActiveElementAndReplicate(Vector mask, Vector defaultValues, Vector data); // CLASTB // MOVPRFX + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ConditionalSelect(Vector mask, Vector left, Vector right); // SEL + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CreateBreakAfterMask(Vector totalMask, Vector fromMask); // BRKA // predicated + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CreateBreakAfterPropagateMask(Vector mask, Vector left, Vector right); // BRKPA + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CreateBreakBeforeMask(Vector totalMask, Vector fromMask); // BRKB // predicated + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CreateBreakBeforePropagateMask(Vector mask, Vector left, Vector right); // BRKPB + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CreateBreakPropagateMask(Vector totalMask, Vector fromMask); // BRKN // predicated + + public static unsafe Vector CreateFalseMaskByte(); // PFALSE + public static unsafe Vector CreateFalseMaskSByte(); // PFALSE + // repeat for Int16, UInt16, Int32, UInt32, Int64, UInt64, Single, Double + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector CreateMaskForFirstActiveElement(Vector totalMask, Vector fromMask); // PFIRST + + /// T: byte, ushort, uint, ulong + public static unsafe Vector CreateMaskForNextActiveElement(Vector totalMask, Vector fromMask); // PNEXT + + public static unsafe Vector CreateTrueMaskByte([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // PTRUE + public static unsafe Vector CreateTrueMaskSByte([ConstantExpected] SveMaskPattern pattern = SveMaskPattern.All); // PTRUE + // repeat for Int16, UInt16, Int32, UInt32, Int64, UInt64, Single, Double + + /// T: byte, ushort, uint, ulong + public static unsafe Vector CreateWhileLessThanMask{8,16,32,64}Bit(int left, int right); // WHILELT + + /// T: byte, ushort, uint, ulong + public static unsafe Vector CreateWhileLessThanMask{8,16,32,64}Bit(long left, long right); // WHILELT + + /// T: byte, ushort, uint, ulong + public static unsafe Vector CreateWhileLessThanMask{8,16,32,64}Bit(uint left, uint right); // WHILELO + + /// T: byte, ushort, uint, ulong + public static unsafe Vector CreateWhileLessThanMask{8,16,32,64}Bit(ulong left, ulong right); // WHILELO + + /// T: byte, ushort, uint, ulong + public static unsafe Vector CreateWhileLessThanOrEqualMask{8,16,32,64}Bit(int left, int right); // WHILELE + + /// T: byte, ushort, uint, ulong + public static unsafe Vector CreateWhileLessThanOrEqualMask{8,16,32,64}Bit(long left, long right); // WHILELE + + /// T: byte, ushort, uint, ulong + public static unsafe Vector CreateWhileLessThanOrEqualMask{8,16,32,64}Bit(uint left, uint right); // WHILELS + + /// T: byte, ushort, uint, ulong + public static unsafe Vector CreateWhileLessThanOrEqualMask{8,16,32,64}Bit(ulong left, ulong right); // WHILELS + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe T ExtractAfterLastScalar(Vector value); // LASTA // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ExtractAfterLastVector(Vector value); // LASTA // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe T ExtractLastScalar(Vector value); // LASTB // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ExtractLastVector(Vector value); // LASTB // predicated + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector ExtractVector(Vector upper, Vector lower, [ConstExpected] byte index); // EXT // MOVPRFX + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe bool TestAnyTrue(Vector leftMask, Vector rightMask); // PTEST + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe bool TestFirstTrue(Vector leftMask, Vector rightMask); // PTEST + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe bool TestLastTrue(Vector leftMask, Vector rightMask); // PTEST + +} diff --git a/sve_api/post_review/apiraw_FEAT_SVE__maths.cs b/sve_api/post_review/apiraw_FEAT_SVE__maths.cs new file mode 100644 index 0000000000000..8410d6fcd29ea --- /dev/null +++ b/sve_api/post_review/apiraw_FEAT_SVE__maths.cs @@ -0,0 +1,125 @@ +namespace System.Runtime.Intrinsics.Arm; + +public abstract partial class Sve : AdvSimd +{ + /// T: float, double, sbyte, short, int, long + public static unsafe Vector Abs(Vector value); + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector AbsoluteDifference(Vector left, Vector right); + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector Add(Vector left, Vector right); + + /// T: float, double, long, ulong + public static unsafe Vector AddAcross(Vector value); + + /// T: [long, sbyte], [long, short], [long, int], [ulong, byte], [ulong, ushort], [ulong, uint] + public static unsafe Vector AddAcross(Vector value); + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector AddSaturate(Vector left, Vector right); + + /// T: float, double, int, long, uint, ulong + public static unsafe Vector Divide(Vector left, Vector right); + + /// T: [int, sbyte], [long, short], [uint, byte], [ulong, ushort] + public static unsafe Vector DotProduct(Vector addend, Vector left, Vector right); + + /// T: [int, sbyte], [long, short], [uint, byte], [ulong, ushort] + public static unsafe Vector DotProductBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// T: float, double + public static unsafe Vector FusedMultiplyAdd(Vector addend, Vector left, Vector right); + + /// T: float, double + public static unsafe Vector FusedMultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// T: float, double + public static unsafe Vector FusedMultiplyAddNegated(Vector addend, Vector left, Vector right); + + /// T: float, double + public static unsafe Vector FusedMultiplySubtract(Vector minuend, Vector left, Vector right); + + /// T: float, double + public static unsafe Vector FusedMultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// T: float, double + public static unsafe Vector FusedMultiplySubtractNegated(Vector minuend, Vector left, Vector right); + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector Max(Vector left, Vector right); + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector MaxAcross(Vector value); + + /// T: float, double + public static unsafe Vector MaxNumber(Vector left, Vector right); + + /// T: float, double + public static unsafe Vector MaxNumberAcross(Vector value); + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector Min(Vector left, Vector right); + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector MinAcross(Vector value); + + /// T: float, double + public static unsafe Vector MinNumber(Vector left, Vector right); + + /// T: float, double + public static unsafe Vector MinNumberAcross(Vector value); + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector Multiply(Vector left, Vector right); + + /// T: float, double + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right); + + /// T: float, double + public static unsafe Vector MultiplyExtended(Vector left, Vector right); + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right); + + /// T: float, double, sbyte, short, int, long + public static unsafe Vector Negate(Vector value); + + /// T: int, long + public static unsafe Vector SignExtend16(Vector value); + + public static unsafe Vector SignExtend32(Vector value); + + /// T: short, int, long + public static unsafe Vector SignExtend8(Vector value); + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector SignExtendWideningLower(Vector value); + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector SignExtendWideningUpper(Vector value); + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector Subtract(Vector left, Vector right); + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector SubtractSaturate(Vector left, Vector right); + + /// T: uint, ulong + public static unsafe Vector ZeroExtend16(Vector value); + + public static unsafe Vector ZeroExtend32(Vector value); + + /// T: ushort, uint, ulong + public static unsafe Vector ZeroExtend8(Vector value); + + /// T: [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector ZeroExtendWideningLower(Vector value); + + /// T: [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector ZeroExtendWideningUpper(Vector value); +} \ No newline at end of file diff --git a/sve_api/post_review/apiraw_FEAT_SVE__maths_sorted.cs b/sve_api/post_review/apiraw_FEAT_SVE__maths_sorted.cs new file mode 100644 index 0000000000000..465d0e1d61350 --- /dev/null +++ b/sve_api/post_review/apiraw_FEAT_SVE__maths_sorted.cs @@ -0,0 +1,125 @@ +namespace System.Runtime.Intrinsics.Arm; + +public abstract partial class Sve : AdvSimd +{ + /// T: float, double, sbyte, short, int, long + public static unsafe Vector Abs(Vector value); + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector AbsoluteDifference(Vector left, Vector right); + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector Add(Vector left, Vector right); + + /// T: float, double, long, ulong + public static unsafe Vector AddAcross(Vector value); + + /// T: [long, sbyte], [long, short], [long, int], [ulong, byte], [ulong, ushort], [ulong, uint] + public static unsafe Vector AddAcross(Vector value); + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector AddSaturate(Vector left, Vector right); + + /// T: float, double, int, long, uint, ulong + public static unsafe Vector Divide(Vector left, Vector right); + + /// T: [int, sbyte], [long, short], [uint, byte], [ulong, ushort] + public static unsafe Vector DotProduct(Vector addend, Vector left, Vector right); + + /// T: [int, sbyte], [long, short], [uint, byte], [ulong, ushort] + public static unsafe Vector DotProductBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// T: float, double + public static unsafe Vector FusedMultiplyAdd(Vector addend, Vector left, Vector right); + + /// T: float, double + public static unsafe Vector FusedMultiplyAddBySelectedScalar(Vector addend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// T: float, double + public static unsafe Vector FusedMultiplyAddNegated(Vector addend, Vector left, Vector right); + + /// T: float, double + public static unsafe Vector FusedMultiplySubtract(Vector minuend, Vector left, Vector right); + + /// T: float, double + public static unsafe Vector FusedMultiplySubtractBySelectedScalar(Vector minuend, Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// T: float, double + public static unsafe Vector FusedMultiplySubtractNegated(Vector minuend, Vector left, Vector right); + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector Max(Vector left, Vector right); + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector MaxAcross(Vector value); + + /// T: float, double + public static unsafe Vector MaxNumber(Vector left, Vector right); + + /// T: float, double + public static unsafe Vector MaxNumberAcross(Vector value); + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector Min(Vector left, Vector right); + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector MinAcross(Vector value); + + /// T: float, double + public static unsafe Vector MinNumber(Vector left, Vector right); + + /// T: float, double + public static unsafe Vector MinNumberAcross(Vector value); + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector Multiply(Vector left, Vector right); + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector MultiplyAdd(Vector addend, Vector left, Vector right); + + /// T: float, double + public static unsafe Vector MultiplyBySelectedScalar(Vector left, Vector right, [ConstantExpected] byte rightIndex); + + /// T: float, double + public static unsafe Vector MultiplyExtended(Vector left, Vector right); + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector MultiplySubtract(Vector minuend, Vector left, Vector right); + + /// T: float, double, sbyte, short, int, long + public static unsafe Vector Negate(Vector value); + + /// T: int, long + public static unsafe Vector SignExtend16(Vector value); + + public static unsafe Vector SignExtend32(Vector value); + + /// T: short, int, long + public static unsafe Vector SignExtend8(Vector value); + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector SignExtendWideningLower(Vector value); + + /// T: [short, sbyte], [int, short], [long, int] + public static unsafe Vector SignExtendWideningUpper(Vector value); + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector Subtract(Vector left, Vector right); + + /// T: sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe Vector SubtractSaturate(Vector left, Vector right); + + /// T: uint, ulong + public static unsafe Vector ZeroExtend16(Vector value); + + public static unsafe Vector ZeroExtend32(Vector value); + + /// T: ushort, uint, ulong + public static unsafe Vector ZeroExtend8(Vector value); + + /// T: [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector ZeroExtendWideningLower(Vector value); + + /// T: [ushort, byte], [uint, ushort], [ulong, uint] + public static unsafe Vector ZeroExtendWideningUpper(Vector value); +} \ No newline at end of file diff --git a/sve_api/post_review/apiraw_FEAT_SVE__scatterstores.cs b/sve_api/post_review/apiraw_FEAT_SVE__scatterstores.cs new file mode 100644 index 0000000000000..8a730c91eee5b --- /dev/null +++ b/sve_api/post_review/apiraw_FEAT_SVE__scatterstores.cs @@ -0,0 +1,101 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: scatterstores +{ + + /// T: [float, uint], [int, uint], [uint, uint], [double, ulong], [long, ulong], [ulong, ulong] + public static unsafe void Scatter(Vector mask, Vector addresses, Vector data); // ST1W or ST1D + + /// T: [float, int], [int, int], [uint, int], [float, uint], [int, uint], [uint, uint], [double, long], [long, long], [ulong, long], [double, ulong], [long, ulong], [ulong, ulong] + public static unsafe void Scatter(Vector mask, T* address, Vector indices, Vector data); // ST1W or ST1D + + /// T: [float, int], [int, int], [uint, int], [float, uint], [int, uint], [uint, uint], [double, long], [long, long], [ulong, long], [double, ulong], [long, ulong], [ulong, ulong] + public static unsafe void ScatterWithByteOffsets(Vector mask, T* address, Vector offsets, Vector data); // ST1W or ST1D + + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data); // ST1H + + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data); // ST1H + + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data); // ST1H + + public static unsafe void Scatter16BitNarrowing(Vector mask, short* address, Vector indices, Vector data); // ST1H + + public static unsafe void Scatter16BitNarrowing(Vector mask, ushort* address, Vector indices, Vector data); // ST1H + + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data); // ST1B + + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data); // ST1B + + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data); // ST1B + + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data); // ST1H + + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data); // ST1H + + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data); // ST1H + + public static unsafe void Scatter16BitNarrowing(Vector mask, short* address, Vector indices, Vector data); // ST1H + + public static unsafe void Scatter16BitNarrowing(Vector mask, ushort* address, Vector indices, Vector data); // ST1H + + public static unsafe void Scatter32BitNarrowing(Vector mask, Vector addresses, Vector data); // ST1W + + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector offsets, Vector data); // ST1W + + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector offsets, Vector data); // ST1W + + public static unsafe void Scatter32BitNarrowing(Vector mask, int* address, Vector indices, Vector data); // ST1W + + public static unsafe void Scatter32BitNarrowing(Vector mask, uint* address, Vector indices, Vector data); // ST1W + + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data); // ST1B + + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data); // ST1B + + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data); // ST1B + + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data); // ST1B + + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data); // ST1B + + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data); // ST1B + + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data); // ST1H + + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data); // ST1H + + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data); // ST1H + + public static unsafe void Scatter16BitNarrowing(Vector mask, short* address, Vector indices, Vector data); // ST1H + + public static unsafe void Scatter16BitNarrowing(Vector mask, ushort* address, Vector indices, Vector data); // ST1H + + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data); // ST1B + + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data); // ST1B + + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data); // ST1B + + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data); // ST1H + + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data); // ST1H + + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data); // ST1H + + public static unsafe void Scatter16BitNarrowing(Vector mask, short* address, Vector indices, Vector data); // ST1H + + public static unsafe void Scatter16BitNarrowing(Vector mask, ushort* address, Vector indices, Vector data); // ST1H + + public static unsafe void Scatter32BitNarrowing(Vector mask, Vector addresses, Vector data); // ST1W + + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector offsets, Vector data); // ST1W + + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector offsets, Vector data); // ST1W + + public static unsafe void Scatter32BitNarrowing(Vector mask, int* address, Vector indices, Vector data); // ST1W + + public static unsafe void Scatter32BitNarrowing(Vector mask, uint* address, Vector indices, Vector data); // ST1W + + /// total method signatures: 62 +} diff --git a/sve_api/post_review/apiraw_FEAT_SVE__scatterstores_sorted.cs b/sve_api/post_review/apiraw_FEAT_SVE__scatterstores_sorted.cs new file mode 100644 index 0000000000000..9dd5a6437d2d4 --- /dev/null +++ b/sve_api/post_review/apiraw_FEAT_SVE__scatterstores_sorted.cs @@ -0,0 +1,109 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: scatterstores +{ + + /// T: [float, uint], [int, uint], [uint, uint], [double, ulong], [long, ulong], [ulong, ulong] + public static unsafe void Scatter(Vector mask, Vector addresses, Vector data); // ST1W or ST1D + + /// T: [float, int], [int, int], [uint, int], [float, uint], [int, uint], [uint, uint], [double, long], [long, long], [ulong, long], [double, ulong], [long, ulong], [ulong, ulong] + public static unsafe void Scatter(Vector mask, T* address, Vector indices, Vector data); // ST1W or ST1D + +scatter16 + + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data); // ST1H + + public static unsafe void Scatter16BitNarrowing(Vector mask, short* address, Vector indices, Vector data); // ST1H + + public static unsafe void Scatter16BitNarrowing(Vector mask, ushort* address, Vector indices, Vector data); // ST1H + + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data); // ST1H + + public static unsafe void Scatter16BitNarrowing(Vector mask, short* address, Vector indices, Vector data); // ST1H + + public static unsafe void Scatter16BitNarrowing(Vector mask, ushort* address, Vector indices, Vector data); // ST1H + + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data); // ST1H + + public static unsafe void Scatter16BitNarrowing(Vector mask, short* address, Vector indices, Vector data); // ST1H + + public static unsafe void Scatter16BitNarrowing(Vector mask, ushort* address, Vector indices, Vector data); // ST1H + + public static unsafe void Scatter16BitNarrowing(Vector mask, Vector addresses, Vector data); // ST1H + + public static unsafe void Scatter16BitNarrowing(Vector mask, short* address, Vector indices, Vector data); // ST1H + + public static unsafe void Scatter16BitNarrowing(Vector mask, ushort* address, Vector indices, Vector data); // ST1H + + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data); // ST1H + + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data); // ST1H + + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data); // ST1H + + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data); // ST1H + + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data); // ST1H + + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data); // ST1H + + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, short* address, Vector offsets, Vector data); // ST1H + + public static unsafe void Scatter16BitWithByteOffsetsNarrowing(Vector mask, ushort* address, Vector offsets, Vector data); // ST1H + +scatter32 + + public static unsafe void Scatter32BitNarrowing(Vector mask, Vector addresses, Vector data); // ST1W + + public static unsafe void Scatter32BitNarrowing(Vector mask, int* address, Vector indices, Vector data); // ST1W + + public static unsafe void Scatter32BitNarrowing(Vector mask, uint* address, Vector indices, Vector data); // ST1W + + public static unsafe void Scatter32BitNarrowing(Vector mask, Vector addresses, Vector data); // ST1W + + public static unsafe void Scatter32BitNarrowing(Vector mask, int* address, Vector indices, Vector data); // ST1W + + public static unsafe void Scatter32BitNarrowing(Vector mask, uint* address, Vector indices, Vector data); // ST1W + + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector offsets, Vector data); // ST1W + + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector offsets, Vector data); // ST1W + + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, int* address, Vector offsets, Vector data); // ST1W + + public static unsafe void Scatter32BitWithByteOffsetsNarrowing(Vector mask, uint* address, Vector offsets, Vector data); // ST1W + + +scatter8 + + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data); // ST1B + + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data); // ST1B + + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data); // ST1B + + public static unsafe void Scatter8BitNarrowing(Vector mask, Vector addresses, Vector data); // ST1B + + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data); // ST1B + + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data); // ST1B + + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data); // ST1B + + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data); // ST1B + + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data); // ST1B + + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data); // ST1B + + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, sbyte* address, Vector offsets, Vector data); // ST1B + + public static unsafe void Scatter8BitWithByteOffsetsNarrowing(Vector mask, byte* address, Vector offsets, Vector data); // ST1B + + + /// T: [float, int], [int, int], [uint, int], [float, uint], [int, uint], [uint, uint], [double, long], [long, long], [ulong, long], [double, ulong], [long, ulong], [ulong, ulong] + public static unsafe void ScatterWithByteOffsets(Vector mask, T* address, Vector offsets, Vector data); // ST1W or ST1D + + /// total method signatures: 62 +} diff --git a/sve_api/post_review/apiraw_FEAT_SVE__stores.cs b/sve_api/post_review/apiraw_FEAT_SVE__stores.cs new file mode 100644 index 0000000000000..79d9971405e47 --- /dev/null +++ b/sve_api/post_review/apiraw_FEAT_SVE__stores.cs @@ -0,0 +1,27 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: stores +{ + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe void Store(Vector mask, T* address, Vector data); // ST1W or ST1D or ST1B or ST1H + + /// T: [short, sbyte], [int, short], [int, sbyte], [long, short], [long, int], [long, sbyte] + /// T: [ushort, byte], [uint, ushort], [uint, byte], [ulong, ushort], [ulong, uint], [ulong, byte] + public static unsafe void StoreNarrowing(Vector mask, T2* address, Vector data); // ST1B or ST1H or ST1W + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe void StoreNonTemporal(Vector mask, T* address, Vector data); // STNT1W or STNT1D or STNT1B or STNT1H + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe void Store(Vector mask, T* address, (Vector Value1, Vector Value2) data); // ST2W or ST2D or ST2B or ST2H + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe void Store(Vector mask, T* address, (Vector Value1, Vector Value2, Vector Value3) data); // ST3W or ST3D or ST3B or ST3H + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe void Store(Vector mask, T* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data); // ST4W or ST4D or ST4B or ST4H + + /// total method signatures: 17 +} diff --git a/sve_api/post_review/apiraw_FEAT_SVE__stores_sorted.cs b/sve_api/post_review/apiraw_FEAT_SVE__stores_sorted.cs new file mode 100644 index 0000000000000..901904ff21079 --- /dev/null +++ b/sve_api/post_review/apiraw_FEAT_SVE__stores_sorted.cs @@ -0,0 +1,27 @@ +namespace System.Runtime.Intrinsics.Arm; + +/// VectorT Summary +public abstract partial class Sve : AdvSimd /// Feature: FEAT_SVE Category: stores +{ + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe void Store(Vector mask, T* address, Vector data); // ST1W or ST1D or ST1B or ST1H + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe void Store(Vector mask, T* address, (Vector Value1, Vector Value2) data); // ST2W or ST2D or ST2B or ST2H + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe void Store(Vector mask, T* address, (Vector Value1, Vector Value2, Vector Value3) data); // ST3W or ST3D or ST3B or ST3H + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe void Store(Vector mask, T* address, (Vector Value1, Vector Value2, Vector Value3, Vector Value4) data); // ST4W or ST4D or ST4B or ST4H + + /// T: [short, sbyte], [int, short], [int, sbyte], [long, short], [long, int], [long, sbyte] + /// T: [ushort, byte], [uint, ushort], [uint, byte], [ulong, ushort], [ulong, uint], [ulong, byte] + public static unsafe void StoreNarrowing(Vector mask, T2* address, Vector data); // ST1B or ST1H or ST1W + + /// T: float, double, sbyte, short, int, long, byte, ushort, uint, ulong + public static unsafe void StoreNonTemporal(Vector mask, T* address, Vector data); // STNT1W or STNT1D or STNT1B or STNT1H + + /// total method signatures: 17 +}